aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/ABI/testing/sysfs-driver-xen-blkback17
-rw-r--r--Documentation/ABI/testing/sysfs-driver-xen-blkfront10
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/DocBook/drm.tmpl138
-rw-r--r--Documentation/DocBook/media_api.tmpl4
-rw-r--r--Documentation/bcache.txt37
-rw-r--r--Documentation/devicetree/bindings/clock/imx27-clock.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/palmas-pmic.txt4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/ja_JP/HOWTO44
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/sysctl/net.txt4
-rw-r--r--MAINTAINERS76
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/atomic.h88
-rw-r--r--arch/alpha/include/asm/param.h8
-rw-r--r--arch/alpha/include/asm/spinlock.h4
-rw-r--r--arch/alpha/include/asm/unistd.h3
-rw-r--r--arch/alpha/include/uapi/asm/param.h7
-rw-r--r--arch/alpha/include/uapi/asm/unistd.h2
-rw-r--r--arch/alpha/kernel/entry.S399
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/smp.c5
-rw-r--r--arch/alpha/kernel/sys_dp264.c8
-rw-r--r--arch/alpha/kernel/sys_marvel.c3
-rw-r--r--arch/alpha/kernel/systbls.S2
-rw-r--r--arch/alpha/kernel/time.c4
-rw-r--r--arch/alpha/kernel/traps.c8
-rw-r--r--arch/arc/include/asm/entry.h1
-rw-r--r--arch/arc/lib/strchr-700.S10
-rw-r--r--arch/arm/Kconfig7
-rw-r--r--arch/arm/Kconfig.debug14
-rw-r--r--arch/arm/Makefile18
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi5
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi22
-rw-r--r--arch/arm/boot/dts/imx28-apx4devkit.dts2
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts2
-rw-r--r--arch/arm/boot/dts/imx28-m28evk.dts2
-rw-r--r--arch/arm/boot/dts/imx28.dtsi1
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts13
-rw-r--r--arch/arm/boot/dts/imx53-mba53.dts2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi32
-rw-r--r--arch/arm/boot/dts/msm8960-cdp.dts4
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts78
-rw-r--r--arch/arm/boot/dts/prima2.dtsi16
-rw-r--r--arch/arm/boot/dts/stih416-pinctrl.dtsi10
-rw-r--r--arch/arm/boot/dts/stih416.dtsi2
-rw-r--r--arch/arm/boot/dts/stih41x.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20-colibri-512.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts4
-rw-r--r--arch/arm/boot/dts/twl4030.dtsi6
-rw-r--r--arch/arm/boot/dts/vf610.dtsi8
-rw-r--r--arch/arm/common/edma.c1
-rw-r--r--arch/arm/configs/da8xx_omapl_defconfig2
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig6
-rw-r--r--arch/arm/configs/nhk8815_defconfig7
-rw-r--r--arch/arm/include/asm/a.out-core.h45
-rw-r--r--arch/arm/include/asm/cputype.h7
-rw-r--r--arch/arm/include/asm/elf.h6
-rw-r--r--arch/arm/include/asm/mmu.h3
-rw-r--r--arch/arm/include/asm/mmu_context.h20
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/processor.h4
-rw-r--r--arch/arm/include/asm/smp_plat.h3
-rw-r--r--arch/arm/include/asm/spinlock.h51
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/tlb.h7
-rw-r--r--arch/arm/include/asm/tlbflush.h16
-rw-r--r--arch/arm/include/asm/virt.h12
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/a.out.h34
-rw-r--r--arch/arm/kernel/entry-armv.S106
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kernel/fiq.c24
-rw-r--r--arch/arm/kernel/head-nommu.S1
-rw-r--r--arch/arm/kernel/head.S1
-rw-r--r--arch/arm/kernel/hyp-stub.S4
-rw-r--r--arch/arm/kernel/machine_kexec.c21
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/process.c49
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/signal.c56
-rw-r--r--arch/arm/kernel/signal.h12
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/kernel/smp_tlb.c17
-rw-r--r--arch/arm/kernel/traps.c46
-rw-r--r--arch/arm/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm/kvm/coproc.c26
-rw-r--r--arch/arm/kvm/coproc.h3
-rw-r--r--arch/arm/kvm/coproc_a15.c6
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/kvm/mmu.c36
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c2
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-davinci/dm355.c2
-rw-r--r--arch/arm/mach-davinci/dm365.c2
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-exynos/Makefile2
-rw-r--r--arch/arm/mach-exynos/common.c26
-rw-r--r--arch/arm/mach-exynos/common.h1
-rw-r--r--arch/arm/mach-exynos/cpuidle.c1
-rw-r--r--arch/arm/mach-exynos/include/mach/memory.h5
-rw-r--r--arch/arm/mach-exynos/pm.c6
-rw-r--r--arch/arm/mach-footbridge/dc21285.c2
-rw-r--r--arch/arm/mach-highbank/highbank.c7
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c5
-rw-r--r--arch/arm/mach-imx/clk-vf610.c2
-rw-r--r--arch/arm/mach-imx/mx27.h2
-rw-r--r--arch/arm/mach-keystone/keystone.c2
-rw-r--r--arch/arm/mach-msm/Kconfig3
-rw-r--r--arch/arm/mach-msm/gpiomux-v1.c33
-rw-r--r--arch/arm/mach-msm/gpiomux.h10
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/board-generic.c23
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c4
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/dss-common.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c18
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h50
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c3
-rw-r--r--arch/arm/mach-omap2/serial.c11
-rw-r--r--arch/arm/mach-omap2/usb-musb.c5
-rw-r--r--arch/arm/mach-pxa/em-x270.c17
-rw-r--r--arch/arm/mach-pxa/mainstone.c3
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c3
-rw-r--r--arch/arm/mach-pxa/poodle.c4
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/arm/mach-pxa/stargate2.c3
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2410.c161
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c3
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c3
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c8
-rw-r--r--arch/arm/mach-shmobile/board-lager.c2
-rw-r--r--arch/arm/mach-sti/Kconfig3
-rw-r--r--arch/arm/mach-sti/headsmp.S2
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/arm/mm/Kconfig37
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/mmu.c57
-rw-r--r--arch/arm/mm/proc-v7-2level.S2
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/arm/mm/proc-v7.S11
-rw-r--r--arch/arm/plat-samsung/Kconfig7
-rw-r--r--arch/arm/plat-samsung/Makefile2
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h5
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h8
-rw-r--r--arch/arm/plat-samsung/init.c5
-rw-r--r--arch/arm/plat-samsung/pm.c14
-rw-r--r--arch/arm/xen/enlighten.c3
-rw-r--r--arch/arm64/include/asm/kvm_asm.h17
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/include/asm/thread_info.h4
-rw-r--r--arch/arm64/include/asm/tlb.h7
-rw-r--r--arch/arm64/include/asm/virt.h13
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kvm/hyp.S13
-rw-r--r--arch/arm64/kvm/sys_regs.c3
-rw-r--r--arch/avr32/boards/atngw100/mrmt.c1
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/ia64/configs/generic_defconfig2
-rw-r--r--arch/ia64/configs/gensparse_defconfig2
-rw-r--r--arch/ia64/configs/tiger_defconfig2
-rw-r--r--arch/ia64/configs/xen_domu_defconfig2
-rw-r--r--arch/ia64/include/asm/tlb.h9
-rw-r--r--arch/m68k/emu/natfeat.c23
-rw-r--r--arch/m68k/include/asm/div64.h9
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/bcm47xx/Kconfig1
-rw-r--r--arch/mips/include/asm/cpu-features.h2
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h4
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h7
-rw-r--r--arch/mips/kernel/bmips_vec.S6
-rw-r--r--arch/mips/kernel/smp-bmips.c22
-rw-r--r--arch/mips/math-emu/cp1emu.c26
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c2
-rw-r--r--arch/mips/pnx833x/common/platform.c2
-rw-r--r--arch/mips/powertv/asic/asic_devices.c3
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/parisc/configs/c8000_defconfig279
-rw-r--r--arch/parisc/include/asm/parisc-device.h3
-rw-r--r--arch/parisc/kernel/cache.c135
-rw-r--r--arch/parisc/kernel/inventory.c1
-rw-r--r--arch/parisc/kernel/signal.c7
-rw-r--r--arch/parisc/kernel/signal32.c1
-rw-r--r--arch/parisc/kernel/sys32.h36
-rw-r--r--arch/parisc/kernel/sys_parisc32.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/eeh.h30
-rw-r--r--arch/powerpc/include/asm/hw_irq.h7
-rw-r--r--arch/powerpc/include/asm/module.h5
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h1
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h6
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/reg.h34
-rw-r--r--arch/powerpc/include/asm/smp.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h9
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/perf_event.h18
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/eeh.c72
-rw-r--r--arch/powerpc/kernel/eeh_cache.c18
-rw-r--r--arch/powerpc/kernel/eeh_driver.c77
-rw-r--r--arch/powerpc/kernel/eeh_pe.c58
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c21
-rw-r--r--arch/powerpc/kernel/entry_64.S36
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S5
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c49
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c56
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/prom_init.c5
-rw-r--r--arch/powerpc/kernel/tm.S20
-rw-r--r--arch/powerpc/kernel/traps.c58
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c4
-rw-r--r--arch/powerpc/kvm/book3s_pr.c5
-rw-r--r--arch/powerpc/mm/hash_native_64.c12
-rw-r--r--arch/powerpc/mm/numa.c59
-rw-r--r--arch/powerpc/perf/core-book3s.c7
-rw-r--r--arch/powerpc/perf/power8-pmu.c30
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c17
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c67
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c80
-rw-r--r--arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--arch/s390/Kconfig9
-rw-r--r--arch/s390/boot/compressed/Makefile9
-rw-r--r--arch/s390/boot/compressed/misc.c4
-rw-r--r--arch/s390/include/asm/bitops.h2
-rw-r--r--arch/s390/include/asm/tlb.h8
-rw-r--r--arch/s390/kernel/perf_event.c9
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kvm/kvm-s390.c21
-rw-r--r--arch/s390/kvm/priv.c4
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/oprofile/init.c2
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/sh/configs/sh03_defconfig2
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/um/include/asm/tlb.h6
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/crct10dif-pcl-asm_64.S643
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c151
-rw-r--r--arch/x86/include/asm/bootparam_utils.h4
-rw-r--r--arch/x86/include/asm/microcode_amd.h2
-rw-r--r--arch/x86/include/asm/pgtable-2level.h48
-rw-r--r--arch/x86/include/asm/pgtable-3level.h3
-rw-r--r--arch/x86/include/asm/pgtable.h30
-rw-r--r--arch/x86/include/asm/pgtable_types.h17
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/kernel/cpu/amd.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c4
-rw-r--r--arch/x86/kernel/early-quirks.c14
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/microcode_amd.c36
-rw-r--r--arch/x86/kernel/microcode_amd_early.c27
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/mm/mmap.c6
-rw-r--r--arch/x86/platform/ce4100/ce4100.c1
-rw-r--r--arch/x86/xen/setup.c22
-rw-r--r--arch/x86/xen/smp.c11
-rw-r--r--crypto/Kconfig19
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/crct10dif.c178
-rw-r--r--crypto/tcrypt.c8
-rw-r--r--crypto/testmgr.c10
-rw-r--r--crypto/testmgr.h33
-rw-r--r--drivers/accessibility/braille/braille_console.c9
-rw-r--r--drivers/acpi/acpi_processor.c3
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/glue.c133
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/proc.c8
-rw-r--r--drivers/acpi/video.c80
-rw-r--r--drivers/acpi/video_detect.c15
-rw-r--r--drivers/ata/Kconfig11
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/ahci_imx.c236
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-pmp.c12
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/pata_imx.c1
-rw-r--r--drivers/ata/sata_fsl.c5
-rw-r--r--drivers/ata/sata_highbank.c4
-rw-r--r--drivers/ata/sata_inic162x.c14
-rw-r--r--drivers/base/regmap/regcache.c3
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/aoe/aoecmd.c17
-rw-r--r--drivers/block/drbd/drbd_actlog.c21
-rw-r--r--drivers/block/drbd/drbd_int.h15
-rw-r--r--drivers/block/drbd/drbd_main.c61
-rw-r--r--drivers/block/drbd/drbd_nl.c185
-rw-r--r--drivers/block/drbd/drbd_receiver.c12
-rw-r--r--drivers/block/drbd/drbd_state.c4
-rw-r--r--drivers/block/rsxx/core.c359
-rw-r--r--drivers/block/rsxx/cregs.c14
-rw-r--r--drivers/block/rsxx/dev.c33
-rw-r--r--drivers/block/rsxx/dma.c185
-rw-r--r--drivers/block/rsxx/rsxx_priv.h10
-rw-r--r--drivers/block/xen-blkback/blkback.c872
-rw-r--r--drivers/block/xen-blkback/common.h147
-rw-r--r--drivers/block/xen-blkback/xenbus.c85
-rw-r--r--drivers/block/xen-blkfront.c532
-rw-r--r--drivers/bluetooth/ath3k.c46
-rw-r--r--drivers/bluetooth/btusb.c18
-rw-r--r--drivers/char/agp/parisc-agp.c6
-rw-r--r--drivers/char/virtio_console.c70
-rw-r--r--drivers/clk/samsung/clk-exynos4.c64
-rw-r--r--drivers/clk/zynq/clkc.c13
-rw-r--r--drivers/cpufreq/cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c20
-rw-r--r--drivers/cpufreq/cpufreq_governor.c8
-rw-r--r--drivers/cpufreq/cpufreq_governor.h4
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
-rw-r--r--drivers/cpufreq/intel_pstate.c12
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c11
-rw-r--r--drivers/cpuidle/governors/menu.c106
-rw-r--r--drivers/crypto/caam/caamhash.c2
-rw-r--r--drivers/dma/pch_dma.c1
-rw-r--r--drivers/dma/pl330.c93
-rw-r--r--drivers/dma/sh/shdma.c4
-rw-r--r--drivers/edac/edac_mc.c9
-rw-r--r--drivers/edac/edac_mc_sysfs.c28
-rw-r--r--drivers/edac/i5100_edac.c2
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firmware/dmi_scan.c14
-rw-r--r--drivers/gpio/gpio-msm-v1.c1
-rw-r--r--drivers/gpio/gpio-msm-v2.c2
-rw-r--r--drivers/gpu/drm/Kconfig15
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c9
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c9
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c6
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c51
-rw-r--r--drivers/gpu/drm/drm_bufs.c236
-rw-r--r--drivers/gpu/drm/drm_context.c81
-rw-r--r--drivers/gpu/drm/drm_crtc.c173
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c116
-rw-r--r--drivers/gpu/drm/drm_dma.c17
-rw-r--r--drivers/gpu/drm/drm_drv.c106
-rw-r--r--drivers/gpu/drm/drm_edid.c306
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c5
-rw-r--r--drivers/gpu/drm/drm_flip_work.c124
-rw-r--r--drivers/gpu/drm/drm_fops.c98
-rw-r--r--drivers/gpu/drm/drm_gem.c440
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c26
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c62
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c229
-rw-r--r--drivers/gpu/drm/drm_modes.c58
-rw-r--r--drivers/gpu/drm/drm_pci.c35
-rw-r--r--drivers/gpu/drm/drm_platform.c16
-rw-r--r--drivers/gpu/drm/drm_prime.c190
-rw-r--r--drivers/gpu/drm/drm_proc.c209
-rw-r--r--drivers/gpu/drm/drm_scatter.c29
-rw-r--r--drivers/gpu/drm/drm_stub.c73
-rw-r--r--drivers/gpu/drm/drm_usb.c9
-rw-r--r--drivers/gpu/drm/drm_vm.c3
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c436
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c57
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c920
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c154
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c89
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c71
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c31
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h2
-rw-r--r--drivers/gpu/drm/gma500/gem.c39
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c776
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h103
-rw-r--r--drivers/gpu/drm/gma500/gtt.c38
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c15
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h16
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c65
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c63
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c43
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c48
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_device.h (renamed from drivers/gpu/drm/gma500/psb_intel_display.h)13
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c21
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c944
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h44
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c56
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c481
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c986
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c157
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c319
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h604
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c777
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c43
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c191
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c313
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c208
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1019
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1523
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h166
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c71
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h45
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c88
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1660
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c518
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h150
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c61
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c23
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c305
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c52
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c21
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1167
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c91
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c59
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c31
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c595
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c3
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c46
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/msm/Kconfig34
-rw-r--r--drivers/gpu/drm/msm/Makefile30
-rw-r--r--drivers/gpu/drm/msm/NOTES69
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h1438
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h2193
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c502
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h30
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h432
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c370
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h141
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h254
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h502
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h114
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h48
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c272
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h131
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h508
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c167
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c367
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c281
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c141
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c214
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h50
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4.xml.h1061
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_crtc.c685
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c305
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_format.c56
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_irq.c203
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c365
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.h194
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_plane.c243
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c776
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h213
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c202
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c258
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c597
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h99
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c412
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c463
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h124
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c61
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/falcon.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/xtensa.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/falcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c107
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c96
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c297
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c48
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c14
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c74
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c52
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c169
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c51
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c263
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h84
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c184
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c28
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c111
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c321
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c71
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c212
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c3
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h2
-rw-r--r--drivers/gpu/drm/r128/r128_state.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile24
-rw-r--r--drivers/gpu/drm/radeon/atom.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios.h615
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c45
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c16
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c23
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c54
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5239
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h332
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c262
-rw-r--r--drivers/gpu/drm/radeon/cik.c3139
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c785
-rw-r--r--drivers/gpu/drm/radeon/cikd.h594
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h2
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h944
-rw-r--r--drivers/gpu/drm/radeon/clearstate_evergreen.h2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c20
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c278
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c538
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c729
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c54
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c190
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c98
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h14
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2645
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h199
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c207
-rw-r--r--drivers/gpu/drm/radeon/ni.c385
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c338
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c38
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h57
-rw-r--r--drivers/gpu/drm/radeon/pptable.h682
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c811
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c60
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c785
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h1
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c497
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c304
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h6
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c150
-rw-r--r--drivers/gpu/drm/radeon/r600d.h39
-rw-r--r--drivers/gpu/drm/radeon/radeon.h281
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1263
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h121
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c188
-rw-r--r--drivers/gpu/drm/radeon/radeon_blit_common.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c159
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c97
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c159
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c68
-rw-r--r--drivers/gpu/drm/radeon/rv770.c217
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c101
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c48
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h16
-rw-r--r--drivers/gpu/drm/radeon/si.c857
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c235
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c180
-rw-r--r--drivers/gpu/drm/radeon/sid.h71
-rw-r--r--drivers/gpu/drm/radeon/smu7.h170
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h486
-rw-r--r--drivers/gpu/drm/radeon/smu7_fusion.h300
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c436
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c165
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c55
-rw-r--r--drivers/gpu/drm/radeon/uvd_v4_2.c68
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig7
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c258
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c176
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h63
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c202
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h49
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c187
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c165
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h29
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c (renamed from drivers/gpu/drm/rcar-du/rcar_du_lvds.c)101
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h (renamed from drivers/gpu/drm/rcar-du/rcar_du_lvds.h)17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c196
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h46
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c170
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h94
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c (renamed from drivers/gpu/drm/rcar-du/rcar_du_vga.c)65
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.h (renamed from drivers/gpu/drm/rcar-du/rcar_du_vga.h)15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds_regs.h69
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c3
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c43
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c102
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c231
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c63
-rw-r--r--drivers/gpu/drm/udl/udl_main.c4
-rw-r--r--drivers/gpu/drm/via/via_dma.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_drv.h2
-rw-r--r--drivers/gpu/drm/via/via_mm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c58
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/host1x/drm/dc.c2
-rw-r--r--drivers/gpu/host1x/drm/drm.c7
-rw-r--r--drivers/gpu/host1x/drm/gem.c16
-rw-r--r--drivers/gpu/host1x/drm/gem.h3
-rw-r--r--drivers/gpu/host1x/drm/hdmi.c20
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c147
-rw-r--r--drivers/hid/hid-logitech-dj.c45
-rw-r--r--drivers/hid/hid-logitech-dj.h1
-rw-r--r--drivers/hid/hid-sony.c3
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hv/hv_balloon.c21
-rw-r--r--drivers/hv/vmbus_drv.c8
-rw-r--r--drivers/hwmon/adt7470.c2
-rw-r--r--drivers/hwmon/max6697.c4
-rw-r--r--drivers/i2c/busses/i2c-kempld.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c30
-rw-r--r--drivers/iio/industrialio-trigger.c34
-rw-r--r--drivers/iio/light/adjd_s311.c3
-rw-r--r--drivers/infiniband/core/cma.c29
-rw-r--r--drivers/infiniband/core/mad.c8
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c76
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c9
-rw-r--r--drivers/macintosh/windfarm_rm31.c18
-rw-r--r--drivers/md/bcache/alloc.c46
-rw-r--r--drivers/md/bcache/bcache.h61
-rw-r--r--drivers/md/bcache/bset.c56
-rw-r--r--drivers/md/bcache/bset.h4
-rw-r--r--drivers/md/bcache/btree.c451
-rw-r--r--drivers/md/bcache/btree.h35
-rw-r--r--drivers/md/bcache/closure.c6
-rw-r--r--drivers/md/bcache/debug.c178
-rw-r--r--drivers/md/bcache/debug.h11
-rw-r--r--drivers/md/bcache/io.c68
-rw-r--r--drivers/md/bcache/journal.c25
-rw-r--r--drivers/md/bcache/movinggc.c24
-rw-r--r--drivers/md/bcache/request.c197
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/super.c171
-rw-r--r--drivers/md/bcache/sysfs.c68
-rw-r--r--drivers/md/bcache/trace.c47
-rw-r--r--drivers/md/bcache/util.c17
-rw-r--r--drivers/md/bcache/util.h6
-rw-r--r--drivers/md/bcache/writeback.c133
-rw-r--r--drivers/md/bcache/writeback.h64
-rw-r--r--drivers/md/dm-cache-policy-mq.c16
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/media/i2c/ml86v7667.c4
-rw-r--r--drivers/media/platform/coda.c2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c79
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c46
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c11
-rw-r--r--drivers/media/usb/usbtv/Kconfig2
-rw-r--r--drivers/media/usb/usbtv/usbtv.c51
-rw-r--r--drivers/misc/atmel-ssc.c11
-rw-r--r--drivers/misc/mei/hbm.c2
-rw-r--r--drivers/misc/mei/hw-me.c14
-rw-r--r--drivers/misc/mei/init.c3
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/can/usb/esd_usb2.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/ethernet/allwinner/Kconfig26
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c90
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c66
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c107
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c38
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c31
-rw-r--r--drivers/net/ethernet/marvell/skge.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c135
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c98
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c85
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c62
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c27
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c101
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c29
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c46
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c49
-rw-r--r--drivers/net/ethernet/realtek/r8169.c8
-rw-r--r--drivers/net/ethernet/sfc/filter.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c111
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/irda/via-ircc.c6
-rw-r--r--drivers/net/macvlan.c27
-rw-r--r--drivers/net/macvtap.c30
-rw-r--r--drivers/net/phy/mdio-sun4i.c14
-rw-r--r--drivers/net/phy/realtek.c4
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/ax88179_178a.c9
-rw-r--r--drivers/net/usb/hso.c15
-rw-r--r--drivers/net/usb/r8152.c126
-rw-r--r--drivers/net/usb/r815x.c62
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/vxlan.c59
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c44
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c5
-rw-r--r--drivers/net/wireless/cw1200/sta.c7
-rw-r--r--drivers/net/wireless/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c16
-rw-r--r--drivers/net/wireless/iwlegacy/common.c1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c15
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c65
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c33
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c15
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c4
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c3
-rw-r--r--drivers/net/wireless/mwifiex/init.c10
-rw-r--r--drivers/net/wireless/mwifiex/join.c6
-rw-r--r--drivers/net/wireless/mwifiex/main.c13
-rw-r--r--drivers/net/wireless/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c95
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h3
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c18
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig72
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile10
-rw-r--r--drivers/net/wireless/rtlwifi/base.c19
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c1
-rw-r--r--drivers/net/wireless/rtlwifi/debug.c1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c22
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c16
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c9
-rw-r--r--drivers/net/wireless/zd1201.c4
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/irq.c6
-rw-r--r--drivers/parisc/iosapic.c38
-rw-r--r--drivers/pci/host/pci-mvebu.c27
-rw-r--r--drivers/pci/hotplug/Kconfig5
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c9
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c1
-rw-r--r--drivers/pci/pci-acpi.c15
-rw-r--r--drivers/pci/pcie/Kconfig5
-rw-r--r--drivers/pci/setup-bus.c69
-rw-r--r--drivers/pinctrl/core.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c66
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c1
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c24
-rw-r--r--drivers/platform/olpc/olpc-ec.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c16
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/rapidio/rio.c4
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c35
-rw-r--r--drivers/rtc/rtc-twl.c3
-rw-r--r--drivers/s390/block/dasd.c6
-rw-r--r--drivers/s390/scsi/zfcp_erp.c29
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c8
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c14
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_main.c22
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/isci/task.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c20
-rw-r--r--drivers/scsi/mvsas/mv_sas.c11
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c11
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/staging/android/logger.c4
-rw-r--r--drivers/staging/comedi/TODO2
-rw-r--r--drivers/staging/comedi/comedi_fops.c32
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/frontier/alphatrack.c2
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c2
-rw-r--r--drivers/staging/imx-drm/Kconfig1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c25
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c3
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c7
-rw-r--r--drivers/staging/zcache/zcache-main.c6
-rw-r--r--drivers/staging/zram/zram_drv.c6
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c18
-rw-r--r--drivers/tty/serial/8250/8250_early.c3
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c3
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c38
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/tty/tty_port.c5
-rw-r--r--drivers/usb/chipidea/Kconfig4
-rw-r--r--drivers/usb/chipidea/bits.h4
-rw-r--r--drivers/usb/class/usbtmc.c8
-rw-r--r--drivers/usb/core/hub.c53
-rw-r--r--drivers/usb/core/hub.h3
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/Kconfig5
-rw-r--r--drivers/usb/gadget/at91_udc.c16
-rw-r--r--drivers/usb/gadget/ether.c14
-rw-r--r--drivers/usb/gadget/f_ecm.c7
-rw-r--r--drivers/usb/gadget/f_eem.c7
-rw-r--r--drivers/usb/gadget/f_ncm.c7
-rw-r--r--drivers/usb/gadget/f_phonet.c9
-rw-r--r--drivers/usb/gadget/f_rndis.c7
-rw-r--r--drivers/usb/gadget/f_subset.c7
-rw-r--r--drivers/usb/gadget/fotg210-udc.c4
-rw-r--r--drivers/usb/gadget/multi.c10
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c4
-rw-r--r--drivers/usb/gadget/udc-core.c8
-rw-r--r--drivers/usb/host/ehci-hub.c1
-rw-r--r--drivers/usb/host/ehci-sched.c13
-rw-r--r--drivers/usb/host/ohci-pci.c5
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/xhci-mem.c1
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci.c18
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/musb/tusb6010.c7
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c2
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c2
-rw-r--r--drivers/usb/phy/phy-samsung-usb2.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c4
-rw-r--r--drivers/usb/serial/Kconfig7
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c31
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h34
-rw-r--r--drivers/usb/serial/keyspan.c2
-rw-r--r--drivers/usb/serial/mos7720.c21
-rw-r--r--drivers/usb/serial/mos7840.c175
-rw-r--r--drivers/usb/serial/option.c23
-rw-r--r--drivers/usb/serial/suunto.c41
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c11
-rw-r--r--drivers/usb/serial/usb_wwan.c20
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c9
-rw-r--r--drivers/vfio/pci/vfio_pci.c23
-rw-r--r--drivers/vfio/vfio.c37
-rw-r--r--drivers/vhost/net.c37
-rw-r--r--drivers/vhost/scsi.c17
-rw-r--r--drivers/vhost/test.c6
-rw-r--r--drivers/vhost/vhost.h10
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/backlight/max8925_bl.c41
-rw-r--r--drivers/video/hdmi.c141
-rw-r--r--drivers/video/mxsfb.c26
-rw-r--r--drivers/video/nuc900fb.c3
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c18
-rw-r--r--drivers/video/sgivwfb.c2
-rw-r--r--drivers/video/sh7760fb.c2
-rw-r--r--drivers/video/vga16fb.c1
-rw-r--r--drivers/video/xilinxfb.c4
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/Makefile5
-rw-r--r--drivers/xen/events.c13
-rw-r--r--drivers/xen/evtchn.c21
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c19
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/bio.c20
-rw-r--r--fs/btrfs/backref.c48
-rw-r--r--fs/btrfs/ctree.c1
-rw-r--r--fs/btrfs/extent_io.c9
-rw-r--r--fs/btrfs/file.c62
-rw-r--r--fs/btrfs/inode.c52
-rw-r--r--fs/btrfs/transaction.c8
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c5
-rw-r--r--fs/cifs/cifsencrypt.c14
-rw-r--r--fs/cifs/cifsfs.c11
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/connect.c7
-rw-r--r--fs/cifs/file.c1
-rw-r--r--fs/cifs/link.c84
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c6
-rw-r--r--fs/cifs/smb1ops.c1
-rw-r--r--fs/cifs/smb2transport.c9
-rw-r--r--fs/dcache.c11
-rw-r--r--fs/debugfs/inode.c69
-rw-r--r--fs/dlm/user.c1
-rw-r--r--fs/efs/inode.c2
-rw-r--r--fs/exec.c4
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/ext4_jbd2.c8
-rw-r--r--fs/ext4/extents.c2
-rw-r--r--fs/ext4/file.c21
-rw-r--r--fs/ext4/ialloc.c10
-rw-r--r--fs/ext4/inode.c82
-rw-r--r--fs/ext4/ioctl.c6
-rw-r--r--fs/ext4/super.c20
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/fuse/dir.c51
-rw-r--r--fs/gfs2/glock.c8
-rw-r--r--fs/gfs2/glops.c18
-rw-r--r--fs/gfs2/inode.c6
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/hugetlbfs/inode.c18
-rw-r--r--fs/lockd/clntlock.c13
-rw-r--r--fs/lockd/clntproc.c5
-rw-r--r--fs/namei.c10
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/inode.c11
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfsd/nfs4proc.c2
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nfsd/nfs4xdr.c5
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--fs/nilfs2/segbuf.c5
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/dir.c4
-rw-r--r--fs/ocfs2/file.c6
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/refcounttree.c58
-rw-r--r--fs/ocfs2/refcounttree.h6
-rw-r--r--fs/open.c2
-rw-r--r--fs/proc/fd.c2
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/root.c4
-rw-r--r--fs/proc/task_mmu.c31
-rw-r--r--fs/reiserfs/procfs.c99
-rw-r--r--fs/reiserfs/super.c3
-rw-r--r--fs/xfs/xfs_dinode.h3
-rw-r--r--fs/xfs/xfs_inode.c31
-rw-r--r--fs/xfs/xfs_log_recover.c13
-rw-r--r--include/acpi/acpi_bus.h14
-rw-r--r--include/acpi/video.h11
-rw-r--r--include/asm-generic/pgtable.h30
-rw-r--r--include/asm-generic/tlb.h2
-rw-r--r--include/drm/drmP.h254
-rw-r--r--include/drm/drm_agpsupport.h194
-rw-r--r--include/drm/drm_crtc.h85
-rw-r--r--include/drm/drm_dp_helper.h31
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_fb_cma_helper.h1
-rw-r--r--include/drm/drm_fixed.h14
-rw-r--r--include/drm/drm_flip_work.h76
-rw-r--r--include/drm/drm_gem_cma_helper.h8
-rw-r--r--include/drm/drm_mm.h142
-rw-r--r--include/drm/drm_pciids.h48
-rw-r--r--include/drm/drm_vma_manager.h257
-rw-r--r--include/drm/i2c/tda998x.h30
-rw-r--r--include/drm/ttm/ttm_bo_api.h15
-rw-r--r--include/drm/ttm/ttm_bo_driver.h10
-rw-r--r--include/dt-bindings/clock/vf610-clock.h4
-rw-r--r--include/dt-bindings/pinctrl/am33xx.h2
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/cgroup.h3
-rw-r--r--include/linux/cgroup_subsys.h45
-rw-r--r--include/linux/crc-t10dif.h4
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/drbd.h6
-rw-r--r--include/linux/drbd_genl.h2
-rw-r--r--include/linux/drbd_limits.h9
-rw-r--r--include/linux/edac.h7
-rw-r--r--include/linux/firewire.h1
-rw-r--r--include/linux/ftrace_event.h12
-rw-r--r--include/linux/hdmi.h53
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/inetdevice.h34
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h137
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h16
-rw-r--r--include/linux/mlx5/device.h42
-rw-r--r--include/linux/mlx5/driver.h11
-rw-r--r--include/linux/mm_types.h1
-rw-r--r--include/linux/mod_devicetable.h5
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/platform_data/mmc-pxamci.h2
-rw-r--r--include/linux/platform_data/rcar-du.h34
-rw-r--r--include/linux/regmap.h1
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/shdma-base.h4
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/spinlock.h14
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/swapops.h2
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--include/linux/tick.h6
-rw-r--r--include/linux/usb.h11
-rw-r--r--include/linux/user_namespace.h1
-rw-r--r--include/linux/vga_switcheroo.h13
-rw-r--r--include/linux/vmpressure.h3
-rw-r--r--include/linux/wait.h57
-rw-r--r--include/media/v4l2-ctrls.h1
-rw-r--r--include/net/busy_poll.h18
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/ip_tunnels.h14
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/nfc/hci.h2
-rw-r--r--include/net/nfc/nfc.h4
-rw-r--r--include/net/sch_generic.h9
-rw-r--r--include/net/sock.h2
-rw-r--r--include/trace/events/bcache.h381
-rw-r--r--include/trace/ftrace.h4
-rw-r--r--include/uapi/drm/Kbuild1
-rw-r--r--include/uapi/drm/drm.h3
-rw-r--r--include/uapi/drm/drm_mode.h16
-rw-r--r--include/uapi/drm/i915_drm.h49
-rw-r--r--include/uapi/drm/msm_drm.h207
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/firewire-cdev.h4
-rw-r--r--include/uapi/linux/ip.h34
-rw-r--r--include/uapi/linux/nfc.h6
-rw-r--r--include/uapi/linux/pkt_sched.h10
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/usb/ch11.h11
-rw-r--r--include/xen/interface/io/blkif.h53
-rw-r--r--include/xen/interface/io/ring.h5
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/cgroup.c35
-rw-r--r--kernel/cpuset.c20
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/power/process.c11
-rw-r--r--kernel/power/qos.c20
-rw-r--r--kernel/printk/Makefile2
-rw-r--r--kernel/printk/braille.c49
-rw-r--r--kernel/printk/braille.h48
-rw-r--r--kernel/printk/console_cmdline.h14
-rw-r--r--kernel/printk/printk.c (renamed from kernel/printk.c)183
-rw-r--r--kernel/ptrace.c1
-rw-r--r--kernel/sched/core.c96
-rw-r--r--kernel/sched/cpupri.c4
-rw-r--r--kernel/sched/fair.c14
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/sched_clock.c2
-rw-r--r--kernel/time/tick-sched.c14
-rw-r--r--kernel/trace/ftrace.c105
-rw-r--r--kernel/trace/ring_buffer.c26
-rw-r--r--kernel/trace/trace.c254
-rw-r--r--kernel/trace/trace.h18
-rw-r--r--kernel/trace/trace_event_perf.c10
-rw-r--r--kernel/trace/trace_events.c292
-rw-r--r--kernel/trace/trace_events_filter.c21
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_functions_graph.c54
-rw-r--r--kernel/trace/trace_kprobe.c50
-rw-r--r--kernel/trace/trace_mmiotrace.c8
-rw-r--r--kernel/trace/trace_output.c14
-rw-r--r--kernel/trace/trace_syscalls.c26
-rw-r--r--kernel/trace/trace_uprobe.c53
-rw-r--r--kernel/user_namespace.c17
-rw-r--r--kernel/wait.c6
-rw-r--r--kernel/workqueue.c44
-rw-r--r--lib/Kconfig2
-rw-r--r--lib/crc-t10dif.c73
-rw-r--r--lib/lz4/lz4_compress.c4
-rw-r--r--lib/lz4/lz4_decompress.c6
-rw-r--r--lib/lz4/lz4hc_compress.c4
-rw-r--r--lib/mpi/longlong.h17
-rw-r--r--mm/fremap.c11
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/memory.c49
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/rmap.c14
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/slub.c3
-rw-r--r--mm/swap.c29
-rw-r--r--mm/swapfile.c19
-rw-r--r--mm/vmpressure.c28
-rw-r--r--mm/zbud.c2
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/Kconfig2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/gateway_client.c13
-rw-r--r--net/batman-adv/gateway_client.h3
-rw-r--r--net/batman-adv/soft-interface.c9
-rw-r--r--net/batman-adv/unicast.c23
-rw-r--r--net/bluetooth/hci_core.c26
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_fdb.c10
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/bridge/br_multicast.c46
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h12
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/core/flow_dissector.c1
-rw-r--r--net/core/neighbour.c39
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock.c6
-rw-r--r--net/core/sysctl_net_core.c8
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_cubic.c12
-rw-r--r--net/ipv6/addrconf.c47
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ip6_fib.c41
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/ndisc.c8
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/route.c29
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/mesh_ps.c4
-rw-r--r--net/mac80211/mlme.c54
-rw-r--r--net/mac80211/pm.c7
-rw-r--r--net/mac80211/rc80211_minstrel.c3
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c10
-rw-r--r--net/mac80211/rx.c10
-rw-r--r--net/netfilter/nf_conntrack_expect.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c12
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue_core.c5
-rw-r--r--net/netfilter/xt_TCPMSS.c28
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c10
-rw-r--r--net/netfilter/xt_socket.c10
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_domainhash.c104
-rw-r--r--net/netlabel/netlabel_domainhash.h46
-rw-r--r--net/netlabel/netlabel_kapi.c88
-rw-r--r--net/netlabel/netlabel_mgmt.c44
-rw-r--r--net/netlabel/netlabel_unlabeled.c2
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/nfc/core.c20
-rw-r--r--net/nfc/hci/core.c8
-rw-r--r--net/nfc/nci/Kconfig1
-rw-r--r--net/nfc/netlink.c12
-rw-r--r--net/nfc/nfc.h6
-rw-r--r--net/openvswitch/actions.c1
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/sch_api.c41
-rw-r--r--net/sched/sch_atm.c1
-rw-r--r--net/sched/sch_cbq.c1
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_htb.c15
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/transport.c4
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c3
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c9
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/netns.h1
-rw-r--r--net/sunrpc/rpcb_clnt.c48
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/tipc/bearer.c9
-rw-r--r--net/tipc/server.c15
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/nl80211.c39
-rw-r--r--net/wireless/reg.c7
-rw-r--r--net/wireless/sme.c39
-rw-r--r--security/smack/smack_lsm.c24
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/pci/hda/hda_auto_parser.c2
-rw-r--r--sound/pci/hda/hda_generic.c6
-rw-r--r--sound/pci/hda/hda_intel.c36
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/pci/hda/patch_sigmatel.c14
-rw-r--r--sound/soc/au1x/ac97c.c2
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c5
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.h1
-rw-r--r--sound/soc/cirrus/ep93xx-ac97.c4
-rw-r--r--sound/soc/codecs/cs42l52.c5
-rw-r--r--sound/soc/codecs/max98088.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c21
-rw-r--r--sound/soc/codecs/wm0010.c24
-rw-r--r--sound/soc/soc-core.c4
-rw-r--r--sound/soc/soc-dapm.c12
-rw-r--r--sound/soc/tegra/tegra20_ac97.c6
-rw-r--r--sound/soc/tegra/tegra20_spdif.c4
-rw-r--r--sound/soc/tegra/tegra30_i2s.c2
-rw-r--r--sound/usb/6fire/comm.c38
-rw-r--r--sound/usb/6fire/comm.h2
-rw-r--r--sound/usb/6fire/midi.c16
-rw-r--r--sound/usb/6fire/midi.h6
-rw-r--r--sound/usb/6fire/pcm.c43
-rw-r--r--sound/usb/6fire/pcm.h2
-rw-r--r--sound/usb/endpoint.c13
-rw-r--r--sound/usb/hiface/pcm.c2
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--sound/usb/quirks.c6
-rw-r--r--tools/hv/hv_kvp_daemon.c5
1439 files changed, 64132 insertions, 25756 deletions
diff --git a/.gitignore b/.gitignore
index 3b8b9b33be38..7e9932e55475 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,6 +29,7 @@ modules.builtin
29*.bz2 29*.bz2
30*.lzma 30*.lzma
31*.xz 31*.xz
32*.lz4
32*.lzo 33*.lzo
33*.patch 34*.patch
34*.gcno 35*.gcno
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback
new file mode 100644
index 000000000000..8bb43b66eb55
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback
@@ -0,0 +1,17 @@
1What: /sys/module/xen_blkback/parameters/max_buffer_pages
2Date: March 2013
3KernelVersion: 3.11
4Contact: Roger Pau Monné <roger.pau@citrix.com>
5Description:
6 Maximum number of free pages to keep in each block
7 backend buffer.
8
9What: /sys/module/xen_blkback/parameters/max_persistent_grants
10Date: March 2013
11KernelVersion: 3.11
12Contact: Roger Pau Monné <roger.pau@citrix.com>
13Description:
14 Maximum number of grants to map persistently in
15 blkback. If the frontend tries to use more than
16 max_persistent_grants, the LRU kicks in and starts
17 removing 5% of max_persistent_grants every 100ms.
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkfront b/Documentation/ABI/testing/sysfs-driver-xen-blkfront
new file mode 100644
index 000000000000..c0a6cb7eb314
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkfront
@@ -0,0 +1,10 @@
1What: /sys/module/xen_blkfront/parameters/max
2Date: June 2013
3KernelVersion: 3.11
4Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5Description:
6 Maximum number of segments that the frontend will negotiate
7 with the backend for indirect descriptors. The default value
8 is 32 - higher value means more potential throughput but more
9 memory usage. The backend picks the minimum of the frontend
10 and its default backend value.
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index cbfdf5486639..fe397f90a34f 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -84,7 +84,7 @@ X!Iinclude/linux/kobject.h
84 84
85 <sect1><title>Kernel utility functions</title> 85 <sect1><title>Kernel utility functions</title>
86!Iinclude/linux/kernel.h 86!Iinclude/linux/kernel.h
87!Ekernel/printk.c 87!Ekernel/printk/printk.c
88!Ekernel/panic.c 88!Ekernel/panic.c
89!Ekernel/sys.c 89!Ekernel/sys.c
90!Ekernel/rcupdate.c 90!Ekernel/rcupdate.c
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 7d1278e7a434..ed1d6d289022 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -156,13 +156,6 @@
156 </para></listitem> 156 </para></listitem>
157 </varlistentry> 157 </varlistentry>
158 <varlistentry> 158 <varlistentry>
159 <term>DRIVER_USE_MTRR</term>
160 <listitem><para>
161 Driver uses MTRR interface for mapping memory, the DRM core will
162 manage MTRR resources. Deprecated.
163 </para></listitem>
164 </varlistentry>
165 <varlistentry>
166 <term>DRIVER_PCI_DMA</term> 159 <term>DRIVER_PCI_DMA</term>
167 <listitem><para> 160 <listitem><para>
168 Driver is capable of PCI DMA, mapping of PCI DMA buffers to 161 Driver is capable of PCI DMA, mapping of PCI DMA buffers to
@@ -195,28 +188,6 @@
195 </para></listitem> 188 </para></listitem>
196 </varlistentry> 189 </varlistentry>
197 <varlistentry> 190 <varlistentry>
198 <term>DRIVER_IRQ_VBL</term>
199 <listitem><para>Unused. Deprecated.</para></listitem>
200 </varlistentry>
201 <varlistentry>
202 <term>DRIVER_DMA_QUEUE</term>
203 <listitem><para>
204 Should be set if the driver queues DMA requests and completes them
205 asynchronously. Deprecated.
206 </para></listitem>
207 </varlistentry>
208 <varlistentry>
209 <term>DRIVER_FB_DMA</term>
210 <listitem><para>
211 Driver supports DMA to/from the framebuffer, mapping of frambuffer
212 DMA buffers to userspace will be supported. Deprecated.
213 </para></listitem>
214 </varlistentry>
215 <varlistentry>
216 <term>DRIVER_IRQ_VBL2</term>
217 <listitem><para>Unused. Deprecated.</para></listitem>
218 </varlistentry>
219 <varlistentry>
220 <term>DRIVER_GEM</term> 191 <term>DRIVER_GEM</term>
221 <listitem><para> 192 <listitem><para>
222 Driver use the GEM memory manager. 193 Driver use the GEM memory manager.
@@ -234,6 +205,12 @@
234 Driver implements DRM PRIME buffer sharing. 205 Driver implements DRM PRIME buffer sharing.
235 </para></listitem> 206 </para></listitem>
236 </varlistentry> 207 </varlistentry>
208 <varlistentry>
209 <term>DRIVER_RENDER</term>
210 <listitem><para>
211 Driver supports dedicated render nodes.
212 </para></listitem>
213 </varlistentry>
237 </variablelist> 214 </variablelist>
238 </sect3> 215 </sect3>
239 <sect3> 216 <sect3>
@@ -2212,6 +2189,18 @@ void intel_crt_init(struct drm_device *dev)
2212!Iinclude/drm/drm_rect.h 2189!Iinclude/drm/drm_rect.h
2213!Edrivers/gpu/drm/drm_rect.c 2190!Edrivers/gpu/drm/drm_rect.c
2214 </sect2> 2191 </sect2>
2192 <sect2>
2193 <title>Flip-work Helper Reference</title>
2194!Pinclude/drm/drm_flip_work.h flip utils
2195!Iinclude/drm/drm_flip_work.h
2196!Edrivers/gpu/drm/drm_flip_work.c
2197 </sect2>
2198 <sect2>
2199 <title>VMA Offset Manager</title>
2200!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
2201!Edrivers/gpu/drm/drm_vma_manager.c
2202!Iinclude/drm/drm_vma_manager.h
2203 </sect2>
2215 </sect1> 2204 </sect1>
2216 2205
2217 <!-- Internals: kms properties --> 2206 <!-- Internals: kms properties -->
@@ -2422,18 +2411,18 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
2422 </abstract> 2411 </abstract>
2423 <para> 2412 <para>
2424 The <methodname>firstopen</methodname> method is called by the DRM core 2413 The <methodname>firstopen</methodname> method is called by the DRM core
2425 when an application opens a device that has no other opened file handle. 2414 for legacy UMS (User Mode Setting) drivers only when an application
2426 Similarly the <methodname>lastclose</methodname> method is called when 2415 opens a device that has no other opened file handle. UMS drivers can
2427 the last application holding a file handle opened on the device closes 2416 implement it to acquire device resources. KMS drivers can't use the
2428 it. Both methods are mostly used for UMS (User Mode Setting) drivers to 2417 method and must acquire resources in the <methodname>load</methodname>
2429 acquire and release device resources which should be done in the 2418 method instead.
2430 <methodname>load</methodname> and <methodname>unload</methodname>
2431 methods for KMS drivers.
2432 </para> 2419 </para>
2433 <para> 2420 <para>
2434 Note that the <methodname>lastclose</methodname> method is also called 2421 Similarly the <methodname>lastclose</methodname> method is called when
2435 at module unload time or, for hot-pluggable devices, when the device is 2422 the last application holding a file handle opened on the device closes
2436 unplugged. The <methodname>firstopen</methodname> and 2423 it, for both UMS and KMS drivers. Additionally, the method is also
2424 called at module unload time or, for hot-pluggable devices, when the
2425 device is unplugged. The <methodname>firstopen</methodname> and
2437 <methodname>lastclose</methodname> calls can thus be unbalanced. 2426 <methodname>lastclose</methodname> calls can thus be unbalanced.
2438 </para> 2427 </para>
2439 <para> 2428 <para>
@@ -2462,7 +2451,12 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
2462 <para> 2451 <para>
2463 The <methodname>lastclose</methodname> method should restore CRTC and 2452 The <methodname>lastclose</methodname> method should restore CRTC and
2464 plane properties to default value, so that a subsequent open of the 2453 plane properties to default value, so that a subsequent open of the
2465 device will not inherit state from the previous user. 2454 device will not inherit state from the previous user. It can also be
2455 used to execute delayed power switching state changes, e.g. in
2456 conjunction with the vga-switcheroo infrastructure. Beyond that KMS
2457 drivers should not do any further cleanup. Only legacy UMS drivers might
2458 need to clean up device state so that the vga console or an independent
2459 fbdev driver could take over.
2466 </para> 2460 </para>
2467 </sect2> 2461 </sect2>
2468 <sect2> 2462 <sect2>
@@ -2498,7 +2492,6 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
2498 <programlisting> 2492 <programlisting>
2499 .poll = drm_poll, 2493 .poll = drm_poll,
2500 .read = drm_read, 2494 .read = drm_read,
2501 .fasync = drm_fasync,
2502 .llseek = no_llseek, 2495 .llseek = no_llseek,
2503 </programlisting> 2496 </programlisting>
2504 </para> 2497 </para>
@@ -2657,6 +2650,69 @@ int (*resume) (struct drm_device *);</synopsis>
2657 info, since man pages should cover the rest. 2650 info, since man pages should cover the rest.
2658 </para> 2651 </para>
2659 2652
2653 <!-- External: render nodes -->
2654
2655 <sect1>
2656 <title>Render nodes</title>
2657 <para>
2658 DRM core provides multiple character-devices for user-space to use.
2659 Depending on which device is opened, user-space can perform a different
2660 set of operations (mainly ioctls). The primary node is always created
2661 and called <term>card&lt;num&gt;</term>. Additionally, a currently
2662 unused control node, called <term>controlD&lt;num&gt;</term> is also
2663 created. The primary node provides all legacy operations and
2664 historically was the only interface used by userspace. With KMS, the
2665 control node was introduced. However, the planned KMS control interface
2666 has never been written and so the control node stays unused to date.
2667 </para>
2668 <para>
2669 With the increased use of offscreen renderers and GPGPU applications,
2670 clients no longer require running compositors or graphics servers to
2671 make use of a GPU. But the DRM API required unprivileged clients to
2672 authenticate to a DRM-Master prior to getting GPU access. To avoid this
2673 step and to grant clients GPU access without authenticating, render
2674 nodes were introduced. Render nodes solely serve render clients, that
2675 is, no modesetting or privileged ioctls can be issued on render nodes.
2676 Only non-global rendering commands are allowed. If a driver supports
2677 render nodes, it must advertise it via the <term>DRIVER_RENDER</term>
2678 DRM driver capability. If not supported, the primary node must be used
2679 for render clients together with the legacy drmAuth authentication
2680 procedure.
2681 </para>
2682 <para>
2683 If a driver advertises render node support, DRM core will create a
2684 separate render node called <term>renderD&lt;num&gt;</term>. There will
2685 be one render node per device. No ioctls except PRIME-related ioctls
2686 will be allowed on this node. Especially <term>GEM_OPEN</term> will be
2687 explicitly prohibited. Render nodes are designed to avoid the
2688 buffer-leaks, which occur if clients guess the flink names or mmap
2689 offsets on the legacy interface. Additionally to this basic interface,
2690 drivers must mark their driver-dependent render-only ioctls as
2691 <term>DRM_RENDER_ALLOW</term> so render clients can use them. Driver
2692 authors must be careful not to allow any privileged ioctls on render
2693 nodes.
2694 </para>
2695 <para>
2696 With render nodes, user-space can now control access to the render node
2697 via basic file-system access-modes. A running graphics server which
2698 authenticates clients on the privileged primary/legacy node is no longer
2699 required. Instead, a client can open the render node and is immediately
2700 granted GPU access. Communication between clients (or servers) is done
2701 via PRIME. FLINK from render node to legacy node is not supported. New
2702 clients must not use the insecure FLINK interface.
2703 </para>
2704 <para>
2705 Besides dropping all modeset/global ioctls, render nodes also drop the
2706 DRM-Master concept. There is no reason to associate render clients with
2707 a DRM-Master as they are independent of any graphics server. Besides,
2708 they must work without any running master, anyway.
2709 Drivers must be able to run without a master object if they support
2710 render nodes. If, on the other hand, a driver requires shared state
2711 between clients which is visible to user-space and accessible beyond
2712 open-file boundaries, they cannot support render nodes.
2713 </para>
2714 </sect1>
2715
2660 <!-- External: vblank handling --> 2716 <!-- External: vblank handling -->
2661 2717
2662 <sect1> 2718 <sect1>
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index 6a8b7158697f..9c92bb879b6d 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -1,6 +1,6 @@
1<?xml version="1.0"?> 1<?xml version="1.0"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" 2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [ 3 "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
4<!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities; 4<!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities;
5<!ENTITY media-indices SYSTEM "./media-indices.tmpl"> 5<!ENTITY media-indices SYSTEM "./media-indices.tmpl">
6 6
diff --git a/Documentation/bcache.txt b/Documentation/bcache.txt
index c3365f26b2d9..32b6c3189d98 100644
--- a/Documentation/bcache.txt
+++ b/Documentation/bcache.txt
@@ -46,29 +46,33 @@ you format your backing devices and cache device at the same time, you won't
46have to manually attach: 46have to manually attach:
47 make-bcache -B /dev/sda /dev/sdb -C /dev/sdc 47 make-bcache -B /dev/sda /dev/sdb -C /dev/sdc
48 48
49To make bcache devices known to the kernel, echo them to /sys/fs/bcache/register: 49bcache-tools now ships udev rules, and bcache devices are known to the kernel
50immediately. Without udev, you can manually register devices like this:
50 51
51 echo /dev/sdb > /sys/fs/bcache/register 52 echo /dev/sdb > /sys/fs/bcache/register
52 echo /dev/sdc > /sys/fs/bcache/register 53 echo /dev/sdc > /sys/fs/bcache/register
53 54
54To register your bcache devices automatically, you could add something like 55Registering the backing device makes the bcache device show up in /dev; you can
55this to an init script: 56now format it and use it as normal. But the first time using a new bcache
57device, it'll be running in passthrough mode until you attach it to a cache.
58See the section on attaching.
56 59
57 echo /dev/sd* > /sys/fs/bcache/register_quiet 60The devices show up as:
58 61
59It'll look for bcache superblocks and ignore everything that doesn't have one. 62 /dev/bcache<N>
60 63
61Registering the backing device makes the bcache show up in /dev; you can now 64As well as (with udev):
62format it and use it as normal. But the first time using a new bcache device,
63it'll be running in passthrough mode until you attach it to a cache. See the
64section on attaching.
65 65
66The devices show up at /dev/bcacheN, and can be controlled via sysfs from 66 /dev/bcache/by-uuid/<uuid>
67/sys/block/bcacheN/bcache: 67 /dev/bcache/by-label/<label>
68
69To get started:
68 70
69 mkfs.ext4 /dev/bcache0 71 mkfs.ext4 /dev/bcache0
70 mount /dev/bcache0 /mnt 72 mount /dev/bcache0 /mnt
71 73
74You can control bcache devices through sysfs at /sys/block/bcache<N>/bcache .
75
72Cache devices are managed as sets; multiple caches per set isn't supported yet 76Cache devices are managed as sets; multiple caches per set isn't supported yet
73but will allow for mirroring of metadata and dirty data in the future. Your new 77but will allow for mirroring of metadata and dirty data in the future. Your new
74cache set shows up as /sys/fs/bcache/<UUID> 78cache set shows up as /sys/fs/bcache/<UUID>
@@ -80,11 +84,11 @@ must be attached to your cache set to enable caching. Attaching a backing
80device to a cache set is done thusly, with the UUID of the cache set in 84device to a cache set is done thusly, with the UUID of the cache set in
81/sys/fs/bcache: 85/sys/fs/bcache:
82 86
83 echo <UUID> > /sys/block/bcache0/bcache/attach 87 echo <CSET-UUID> > /sys/block/bcache0/bcache/attach
84 88
85This only has to be done once. The next time you reboot, just reregister all 89This only has to be done once. The next time you reboot, just reregister all
86your bcache devices. If a backing device has data in a cache somewhere, the 90your bcache devices. If a backing device has data in a cache somewhere, the
87/dev/bcache# device won't be created until the cache shows up - particularly 91/dev/bcache<N> device won't be created until the cache shows up - particularly
88important if you have writeback caching turned on. 92important if you have writeback caching turned on.
89 93
90If you're booting up and your cache device is gone and never coming back, you 94If you're booting up and your cache device is gone and never coming back, you
@@ -191,6 +195,9 @@ want for getting the best possible numbers when benchmarking.
191 195
192SYSFS - BACKING DEVICE: 196SYSFS - BACKING DEVICE:
193 197
198Available at /sys/block/<bdev>/bcache, /sys/block/bcache*/bcache and
199(if attached) /sys/fs/bcache/<cset-uuid>/bdev*
200
194attach 201attach
195 Echo the UUID of a cache set to this file to enable caching. 202 Echo the UUID of a cache set to this file to enable caching.
196 203
@@ -300,6 +307,8 @@ cache_readaheads
300 307
301SYSFS - CACHE SET: 308SYSFS - CACHE SET:
302 309
310Available at /sys/fs/bcache/<cset-uuid>
311
303average_key_size 312average_key_size
304 Average data per key in the btree. 313 Average data per key in the btree.
305 314
@@ -390,6 +399,8 @@ trigger_gc
390 399
391SYSFS - CACHE DEVICE: 400SYSFS - CACHE DEVICE:
392 401
402Available at /sys/block/<cdev>/bcache
403
393block_size 404block_size
394 Minimum granularity of writes - should match hardware sector size. 405 Minimum granularity of writes - should match hardware sector size.
395 406
diff --git a/Documentation/devicetree/bindings/clock/imx27-clock.txt b/Documentation/devicetree/bindings/clock/imx27-clock.txt
index ab1a56e9de9d..7a2070393732 100644
--- a/Documentation/devicetree/bindings/clock/imx27-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx27-clock.txt
@@ -98,6 +98,7 @@ clocks and IDs.
98 fpm 83 98 fpm 83
99 mpll_osc_sel 84 99 mpll_osc_sel 84
100 mpll_sel 85 100 mpll_sel 85
101 spll_gate 86
101 102
102Examples: 103Examples:
103 104
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
index a1ee681942cc..6113f9275f42 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
@@ -4,7 +4,7 @@
4Required properties : 4Required properties :
5 5
6 - reg : Offset and length of the register set for the device 6 - reg : Offset and length of the register set for the device
7 - compatible : Should be "marvell,mv64xxx-i2c" 7 - compatible : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c"
8 - interrupts : The interrupt number 8 - interrupts : The interrupt number
9 9
10Optional properties : 10Optional properties :
diff --git a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
index d5a308629c57..30b0581bb1ce 100644
--- a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
+++ b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
@@ -31,9 +31,8 @@ Optional nodes:
31 Optional sub-node properties: 31 Optional sub-node properties:
32 ti,warm-reset - maintain voltage during warm reset(boolean) 32 ti,warm-reset - maintain voltage during warm reset(boolean)
33 ti,roof-floor - control voltage selection by pin(boolean) 33 ti,roof-floor - control voltage selection by pin(boolean)
34 ti,sleep-mode - mode to adopt in pmic sleep 0 - off, 1 - auto, 34 ti,mode-sleep - mode to adopt in pmic sleep 0 - off, 1 - auto,
35 2 - eco, 3 - forced pwm 35 2 - eco, 3 - forced pwm
36 ti,tstep - slope control 0 - Jump, 1 10mV/us, 2 5mV/us, 3 2.5mV/us
37 ti,smps-range - OTP has the wrong range set for the hardware so override 36 ti,smps-range - OTP has the wrong range set for the hardware so override
38 0 - low range, 1 - high range. 37 0 - low range, 1 - high range.
39 38
@@ -59,7 +58,6 @@ pmic {
59 ti,warm-reset; 58 ti,warm-reset;
60 ti,roof-floor; 59 ti,roof-floor;
61 ti,mode-sleep = <0>; 60 ti,mode-sleep = <0>;
62 ti,tstep = <0>;
63 ti,smps-range = <1>; 61 ti,smps-range = <1>;
64 }; 62 };
65 63
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index d5a79caec147..366ce9b87240 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -26,6 +26,7 @@ est ESTeem Wireless Modems
26fsl Freescale Semiconductor 26fsl Freescale Semiconductor
27GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc. 27GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
28gef GE Fanuc Intelligent Platforms Embedded Systems, Inc. 28gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
29hisilicon Hisilicon Limited.
29hp Hewlett Packard 30hp Hewlett Packard
30ibm International Business Machines (IBM) 31ibm International Business Machines (IBM)
31idt Integrated Device Technologies, Inc. 32idt Integrated Device Technologies, Inc.
@@ -43,6 +44,7 @@ nxp NXP Semiconductors
43onnn ON Semiconductor Corp. 44onnn ON Semiconductor Corp.
44picochip Picochip Ltd 45picochip Picochip Ltd
45powervr PowerVR (deprecated, use img) 46powervr PowerVR (deprecated, use img)
47qca Qualcomm Atheros, Inc.
46qcom Qualcomm, Inc. 48qcom Qualcomm, Inc.
47ralink Mediatek/Ralink Technology Corp. 49ralink Mediatek/Ralink Technology Corp.
48ramtron Ramtron International 50ramtron Ramtron International
diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO
index 050d37fe6d40..8148a47fc70e 100644
--- a/Documentation/ja_JP/HOWTO
+++ b/Documentation/ja_JP/HOWTO
@@ -11,14 +11,14 @@ for non English (read: Japanese) speakers and is not intended as a
11fork. So if you have any comments or updates for this file, please try 11fork. So if you have any comments or updates for this file, please try
12to update the original English file first. 12to update the original English file first.
13 13
14Last Updated: 2011/03/31 14Last Updated: 2013/07/19
15================================== 15==================================
16ã“れã¯ã€ 16ã“れã¯ã€
17linux-2.6.38/Documentation/HOWTO 17linux-3.10/Documentation/HOWTO
18ã®å’Œè¨³ã§ã™ã€‚ 18ã®å’Œè¨³ã§ã™ã€‚
19 19
20翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ > 20翻訳団体: JF プロジェクト < http://linuxjf.sourceforge.jp/ >
21翻訳日: 2011/3/28 21翻訳日: 2013/7/19
22翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com> 22翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com>
23校正者: æ¾å€‰ã•ã‚“ <nbh--mats at nifty dot com> 23校正者: æ¾å€‰ã•ã‚“ <nbh--mats at nifty dot com>
24 å°æž— é›…å…¸ã•ã‚“ (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp> 24 å°æž— é›…å…¸ã•ã‚“ (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp>
@@ -245,7 +245,7 @@ Linux カーãƒãƒ«ã‚½ãƒ¼ã‚¹ãƒ„リーã®ä¸­ã«å«ã¾ã‚Œã‚‹ã€ãれã„ã«ã—ã€ä¿
245自己å‚ç…§æ–¹å¼ã§ã€ç´¢å¼•ãŒã¤ã„㟠web å½¢å¼ã§ã€ã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰ã‚’å‚ç…§ã™ã‚‹ã“ã¨ãŒ 245自己å‚ç…§æ–¹å¼ã§ã€ç´¢å¼•ãŒã¤ã„㟠web å½¢å¼ã§ã€ã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰ã‚’å‚ç…§ã™ã‚‹ã“ã¨ãŒ
246ã§ãã¾ã™ã€‚ã“ã®æœ€æ–°ã®ç´ æ™´ã—ã„カーãƒãƒ«ã‚³ãƒ¼ãƒ‰ã®ãƒªãƒã‚¸ãƒˆãƒªã¯ä»¥ä¸‹ã§è¦‹ã¤ã‹ã‚Š 246ã§ãã¾ã™ã€‚ã“ã®æœ€æ–°ã®ç´ æ™´ã—ã„カーãƒãƒ«ã‚³ãƒ¼ãƒ‰ã®ãƒªãƒã‚¸ãƒˆãƒªã¯ä»¥ä¸‹ã§è¦‹ã¤ã‹ã‚Š
247ã¾ã™- 247ã¾ã™-
248 http://sosdg.org/~qiyong/lxr/ 248 http://lxr.linux.no/+trees
249 249
250開発プロセス 250開発プロセス
251----------------------- 251-----------------------
@@ -253,24 +253,24 @@ Linux カーãƒãƒ«ã‚½ãƒ¼ã‚¹ãƒ„リーã®ä¸­ã«å«ã¾ã‚Œã‚‹ã€ãれã„ã«ã—ã€ä¿
253Linux カーãƒãƒ«ã®é–‹ç™ºãƒ—ロセスã¯ç¾åœ¨å¹¾ã¤ã‹ã®ç•°ãªã‚‹ãƒ¡ã‚¤ãƒ³ã‚«ãƒ¼ãƒãƒ«ã€Œãƒ–ラン 253Linux カーãƒãƒ«ã®é–‹ç™ºãƒ—ロセスã¯ç¾åœ¨å¹¾ã¤ã‹ã®ç•°ãªã‚‹ãƒ¡ã‚¤ãƒ³ã‚«ãƒ¼ãƒãƒ«ã€Œãƒ–ラン
254ãƒã€ã¨å¤šæ•°ã®ã‚µãƒ–システム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ–ランãƒã‹ã‚‰æ§‹æˆã•れã¾ã™ã€‚ 254ãƒã€ã¨å¤šæ•°ã®ã‚µãƒ–システム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ–ランãƒã‹ã‚‰æ§‹æˆã•れã¾ã™ã€‚
255ã“れらã®ãƒ–ランãƒã¨ã¯- 255ã“れらã®ãƒ–ランãƒã¨ã¯-
256 - メイン㮠2.6.x カーãƒãƒ«ãƒ„リー 256 - メイン㮠3.x カーãƒãƒ«ãƒ„リー
257 - 2.6.x.y -stable カーãƒãƒ«ãƒ„リー 257 - 3.x.y -stable カーãƒãƒ«ãƒ„リー
258 - 2.6.x -git カーãƒãƒ«ãƒ‘ッム258 - 3.x -git カーãƒãƒ«ãƒ‘ッãƒ
259 - サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッム259 - サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッãƒ
260 - çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 2.6.x -next カーãƒãƒ«ãƒ„リー 260 - çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 3.x -next カーãƒãƒ«ãƒ„リー
261 261
2622.6.x カーãƒãƒ«ãƒ„リー 2623.x カーãƒãƒ«ãƒ„リー
263----------------- 263-----------------
264 264
2652.6.x カーãƒãƒ«ã¯ Linus Torvalds ã«ã‚ˆã£ã¦ãƒ¡ãƒ³ãƒ†ãƒŠãƒ³ã‚¹ã•れã€kernel.org 2653.x カーãƒãƒ«ã¯ Linus Torvalds ã«ã‚ˆã£ã¦ãƒ¡ãƒ³ãƒ†ãƒŠãƒ³ã‚¹ã•れã€kernel.org
266ã® pub/linux/kernel/v2.6/ ディレクトリã«å­˜åœ¨ã—ã¾ã™ã€‚ã“ã®é–‹ç™ºãƒ—ロセス㯠266ã® pub/linux/kernel/v3.x/ ディレクトリã«å­˜åœ¨ã—ã¾ã™ã€‚ã“ã®é–‹ç™ºãƒ—ロセスã¯
267以下ã®ã¨ãŠã‚Š- 267以下ã®ã¨ãŠã‚Š-
268 268
269 - æ–°ã—ã„カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•れãŸç›´å¾Œã«ã€2週間ã®ç‰¹åˆ¥æœŸé–“ãŒè¨­ã‘られ〠269 - æ–°ã—ã„カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•れãŸç›´å¾Œã«ã€2週間ã®ç‰¹åˆ¥æœŸé–“ãŒè¨­ã‘られã€
270 ã“ã®æœŸé–“中ã«ã€ãƒ¡ãƒ³ãƒ†ãƒŠé”㯠Linus ã«å¤§ããªå·®åˆ†ã‚’é€ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚ 270 ã“ã®æœŸé–“中ã«ã€ãƒ¡ãƒ³ãƒ†ãƒŠé”㯠Linus ã«å¤§ããªå·®åˆ†ã‚’é€ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚
271 ã“ã®ã‚ˆã†ãªå·®åˆ†ã¯é€šå¸¸ -next カーãƒãƒ«ã«æ•°é€±é–“å«ã¾ã‚Œã¦ããŸãƒ‘ッãƒã§ã™ã€‚ 271 ã“ã®ã‚ˆã†ãªå·®åˆ†ã¯é€šå¸¸ -next カーãƒãƒ«ã«æ•°é€±é–“å«ã¾ã‚Œã¦ããŸãƒ‘ッãƒã§ã™ã€‚
272 大ããªå¤‰æ›´ã¯ git(カーãƒãƒ«ã®ã‚½ãƒ¼ã‚¹ç®¡ç†ãƒ„ールã€è©³ç´°ã¯ 272 大ããªå¤‰æ›´ã¯ git(カーãƒãƒ«ã®ã‚½ãƒ¼ã‚¹ç®¡ç†ãƒ„ールã€è©³ç´°ã¯
273 http://git-scm.com/ å‚ç…§) を使ã£ã¦é€ã‚‹ã®ãŒå¥½ã¾ã—ã„やり方ã§ã™ãŒã€ãƒ‘ッ 273 http://git-scm.com/ å‚ç…§) を使ã£ã¦é€ã‚‹ã®ãŒå¥½ã¾ã—ã„やり方ã§ã™ãŒã€ãƒ‘ッ
274 ãƒãƒ•ァイルã®å½¢å¼ã®ã¾ã¾é€ã‚‹ã®ã§ã‚‚å分ã§ã™ã€‚ 274 ãƒãƒ•ァイルã®å½¢å¼ã®ã¾ã¾é€ã‚‹ã®ã§ã‚‚å分ã§ã™ã€‚
275 275
276 - 2週間後ã€-rc1 カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•れã€ã“ã®å¾Œã«ã¯ã‚«ãƒ¼ãƒãƒ«å…¨ä½“ã®å®‰å®š 276 - 2週間後ã€-rc1 カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•れã€ã“ã®å¾Œã«ã¯ã‚«ãƒ¼ãƒãƒ«å…¨ä½“ã®å®‰å®š
@@ -302,20 +302,20 @@ Andrew Morton ㌠Linux-kernel メーリングリストã«ã‚«ãƒ¼ãƒãƒ«ãƒªãƒªãƒ¼ã
302 実ã«èªè­˜ã•れãŸãƒã‚°ã®çжæ³ã«ã‚ˆã‚Šãƒªãƒªãƒ¼ã‚¹ã•れるã®ã§ã‚りã€å‰ã‚‚ã£ã¦æ±ºã‚ら 302 実ã«èªè­˜ã•れãŸãƒã‚°ã®çжæ³ã«ã‚ˆã‚Šãƒªãƒªãƒ¼ã‚¹ã•れるã®ã§ã‚りã€å‰ã‚‚ã£ã¦æ±ºã‚ら
303 れãŸè¨ˆç”»ã«ã‚ˆã£ã¦ãƒªãƒªãƒ¼ã‚¹ã•れるもã®ã§ã¯ãªã„ã‹ã‚‰ã§ã™ã€‚〠303 れãŸè¨ˆç”»ã«ã‚ˆã£ã¦ãƒªãƒªãƒ¼ã‚¹ã•れるもã®ã§ã¯ãªã„ã‹ã‚‰ã§ã™ã€‚ã€
304 304
3052.6.x.y -stable カーãƒãƒ«ãƒ„リー 3053.x.y -stable カーãƒãƒ«ãƒ„リー
306--------------------------- 306---------------------------
307 307
308ãƒãƒ¼ã‚¸ãƒ§ãƒ³ç•ªå·ãŒ4ã¤ã®æ•°å­—ã«åˆ†ã‹ã‚Œã¦ã„るカーãƒãƒ«ã¯ -stable カーãƒãƒ«ã§ã™ã€‚ 308ãƒãƒ¼ã‚¸ãƒ§ãƒ³ç•ªå·ãŒ3ã¤ã®æ•°å­—ã«åˆ†ã‹ã‚Œã¦ã„るカーãƒãƒ«ã¯ -stable カーãƒãƒ«ã§ã™ã€‚
309ã“れã«ã¯ã€2.6.x カーãƒãƒ«ã§è¦‹ã¤ã‹ã£ãŸã‚»ã‚­ãƒ¥ãƒªãƒ†ã‚£å•題やé‡å¤§ãªå¾Œæˆ»ã‚Šã«å¯¾ 309ã“れã«ã¯ã€3.x カーãƒãƒ«ã§è¦‹ã¤ã‹ã£ãŸã‚»ã‚­ãƒ¥ãƒªãƒ†ã‚£å•題やé‡å¤§ãªå¾Œæˆ»ã‚Šã«å¯¾
310ã™ã‚‹æ¯”較的å°ã•ã„é‡è¦ãªä¿®æ­£ãŒå«ã¾ã‚Œã¾ã™ã€‚ 310ã™ã‚‹æ¯”較的å°ã•ã„é‡è¦ãªä¿®æ­£ãŒå«ã¾ã‚Œã¾ã™ã€‚
311 311
312ã“れã¯ã€é–‹ç™º/実験的ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ãƒ†ã‚¹ãƒˆã«å”力ã™ã‚‹ã“ã¨ã«èˆˆå‘³ãŒç„¡ã〠312ã“れã¯ã€é–‹ç™º/実験的ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ãƒ†ã‚¹ãƒˆã«å”力ã™ã‚‹ã“ã¨ã«èˆˆå‘³ãŒç„¡ãã€
313最新ã®å®‰å®šã—ãŸã‚«ãƒ¼ãƒãƒ«ã‚’使ã„ãŸã„ãƒ¦ãƒ¼ã‚¶ã«æŽ¨å¥¨ã™ã‚‹ãƒ–ランãƒã§ã™ã€‚ 313最新ã®å®‰å®šã—ãŸã‚«ãƒ¼ãƒãƒ«ã‚’使ã„ãŸã„ãƒ¦ãƒ¼ã‚¶ã«æŽ¨å¥¨ã™ã‚‹ãƒ–ランãƒã§ã™ã€‚
314 314
315ã‚‚ã—ã€2.6.x.y カーãƒãƒ«ãŒå­˜åœ¨ã—ãªã„å ´åˆã«ã¯ã€ç•ªå·ãŒä¸€ç•ªå¤§ãã„ 2.6.x ㌠315ã‚‚ã—ã€3.x.y カーãƒãƒ«ãŒå­˜åœ¨ã—ãªã„å ´åˆã«ã¯ã€ç•ªå·ãŒä¸€ç•ªå¤§ãã„ 3.x ãŒ
316最新ã®å®‰å®šç‰ˆã‚«ãƒ¼ãƒãƒ«ã§ã™ã€‚ 316最新ã®å®‰å®šç‰ˆã‚«ãƒ¼ãƒãƒ«ã§ã™ã€‚
317 317
3182.6.x.y 㯠"stable" ãƒãƒ¼ãƒ  <stable@kernel.org> ã§ãƒ¡ãƒ³ãƒ†ã•れã¦ãŠã‚Šã€å¿… 3183.x.y 㯠"stable" ãƒãƒ¼ãƒ  <stable@kernel.org> ã§ãƒ¡ãƒ³ãƒ†ã•れã¦ãŠã‚Šã€å¿…
319è¦ã«å¿œã˜ã¦ãƒªãƒªãƒ¼ã‚¹ã•れã¾ã™ã€‚通常ã®ãƒªãƒªãƒ¼ã‚¹æœŸé–“㯠2週間毎ã§ã™ãŒã€å·®ã—迫㣠319è¦ã«å¿œã˜ã¦ãƒªãƒªãƒ¼ã‚¹ã•れã¾ã™ã€‚通常ã®ãƒªãƒªãƒ¼ã‚¹æœŸé–“㯠2週間毎ã§ã™ãŒã€å·®ã—è¿«ã£
320ãŸå•題ãŒãªã‘れã°ã‚‚ã†å°‘ã—é•·ããªã‚‹ã“ã¨ã‚‚ã‚りã¾ã™ã€‚セキュリティ関連ã®å•題 320ãŸå•題ãŒãªã‘れã°ã‚‚ã†å°‘ã—é•·ããªã‚‹ã“ã¨ã‚‚ã‚りã¾ã™ã€‚セキュリティ関連ã®å•題
321ã®å ´åˆã¯ã“れã«å¯¾ã—ã¦ã ã„ãŸã„ã®å ´åˆã€ã™ãã«ãƒªãƒªãƒ¼ã‚¹ãŒã•れã¾ã™ã€‚ 321ã®å ´åˆã¯ã“れã«å¯¾ã—ã¦ã ã„ãŸã„ã®å ´åˆã€ã™ãã«ãƒªãƒªãƒ¼ã‚¹ãŒã•れã¾ã™ã€‚
@@ -324,7 +324,7 @@ Andrew Morton ㌠Linux-kernel メーリングリストã«ã‚«ãƒ¼ãƒãƒ«ãƒªãƒªãƒ¼ã
324イルã«ã¯ã©ã®ã‚ˆã†ãªç¨®é¡žã®å¤‰æ›´ãŒ -stable ツリーã«å—ã‘入れå¯èƒ½ã‹ã€ã¾ãŸãƒª 324イルã«ã¯ã©ã®ã‚ˆã†ãªç¨®é¡žã®å¤‰æ›´ãŒ -stable ツリーã«å—ã‘入れå¯èƒ½ã‹ã€ã¾ãŸãƒª
325リースプロセスãŒã©ã†å‹•ãã‹ãŒè¨˜è¿°ã•れã¦ã„ã¾ã™ã€‚ 325リースプロセスãŒã©ã†å‹•ãã‹ãŒè¨˜è¿°ã•れã¦ã„ã¾ã™ã€‚
326 326
3272.6.x -git パッム3273.x -git パッãƒ
328------------------ 328------------------
329 329
330git リãƒã‚¸ãƒˆãƒªã§ç®¡ç†ã•れã¦ã„ã‚‹Linus ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„ãƒªãƒ¼ã®æ¯Žæ—¥ã®ã‚¹ãƒŠãƒƒãƒ— 330git リãƒã‚¸ãƒˆãƒªã§ç®¡ç†ã•れã¦ã„ã‚‹Linus ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„ãƒªãƒ¼ã®æ¯Žæ—¥ã®ã‚¹ãƒŠãƒƒãƒ—
@@ -358,14 +358,14 @@ quilt シリーズã¨ã—ã¦å…¬é–‹ã•れã¦ã„るパッãƒã‚­ãƒ¥ãƒ¼ã‚‚使ã‚れã
358ã‚’ã¤ã‘ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚大部分ã®ã“れら㮠patchwork ã®ã‚µã‚¤ãƒˆã¯ 358ã‚’ã¤ã‘ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚大部分ã®ã“れら㮠patchwork ã®ã‚µã‚¤ãƒˆã¯
359http://patchwork.kernel.org/ ã§ãƒªã‚¹ãƒˆã•れã¦ã„ã¾ã™ã€‚ 359http://patchwork.kernel.org/ ã§ãƒªã‚¹ãƒˆã•れã¦ã„ã¾ã™ã€‚
360 360
361çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 2.6.x -next カーãƒãƒ«ãƒ„リー 361çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 3.x -next カーãƒãƒ«ãƒ„リー
362--------------------------------------------- 362---------------------------------------------
363 363
364ã‚µãƒ–ã‚·ã‚¹ãƒ†ãƒ ãƒ„ãƒªãƒ¼ã®æ›´æ–°å†…容ãŒãƒ¡ã‚¤ãƒ³ãƒ©ã‚¤ãƒ³ã® 2.6.x ツリーã«ãƒžãƒ¼ã‚¸ã•れ 364ã‚µãƒ–ã‚·ã‚¹ãƒ†ãƒ ãƒ„ãƒªãƒ¼ã®æ›´æ–°å†…容ãŒãƒ¡ã‚¤ãƒ³ãƒ©ã‚¤ãƒ³ã® 3.x ツリーã«ãƒžãƒ¼ã‚¸ã•れ
365ã‚‹å‰ã«ã€ãれらã¯çµ±åˆãƒ†ã‚¹ãƒˆã•れる必è¦ãŒã‚りã¾ã™ã€‚ã“ã®ç›®çš„ã®ãŸã‚ã€å®Ÿè³ªçš„ 365ã‚‹å‰ã«ã€ãれらã¯çµ±åˆãƒ†ã‚¹ãƒˆã•れる必è¦ãŒã‚りã¾ã™ã€‚ã“ã®ç›®çš„ã®ãŸã‚ã€å®Ÿè³ªçš„
366ã«å…¨ã‚µãƒ–システムツリーã‹ã‚‰ã»ã¼æ¯Žæ—¥ãƒ—ルã•れã¦ã§ãる特別ãªãƒ†ã‚¹ãƒˆç”¨ã®ãƒª 366ã«å…¨ã‚µãƒ–システムツリーã‹ã‚‰ã»ã¼æ¯Žæ—¥ãƒ—ルã•れã¦ã§ãる特別ãªãƒ†ã‚¹ãƒˆç”¨ã®ãƒª
367ãƒã‚¸ãƒˆãƒªãŒå­˜åœ¨ã—ã¾ã™- 367ãƒã‚¸ãƒˆãƒªãŒå­˜åœ¨ã—ã¾ã™-
368 http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git 368 http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
369 http://linux.f-seidel.de/linux-next/pmwiki/ 369 http://linux.f-seidel.de/linux-next/pmwiki/
370 370
371ã“ã®ã‚„り方ã«ã‚ˆã£ã¦ã€-next カーãƒãƒ«ã¯æ¬¡ã®ãƒžãƒ¼ã‚¸æ©Ÿä¼šã§ã©ã‚“ãªã‚‚ã®ãŒãƒ¡ã‚¤ãƒ³ 371ã“ã®ã‚„り方ã«ã‚ˆã£ã¦ã€-next カーãƒãƒ«ã¯æ¬¡ã®ãƒžãƒ¼ã‚¸æ©Ÿä¼šã§ã©ã‚“ãªã‚‚ã®ãŒãƒ¡ã‚¤ãƒ³
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 15356aca938c..7f9d4f53882c 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2953,7 +2953,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2953 improve throughput, but will also increase the 2953 improve throughput, but will also increase the
2954 amount of memory reserved for use by the client. 2954 amount of memory reserved for use by the client.
2955 2955
2956 swapaccount[=0|1] 2956 swapaccount=[0|1]
2957 [KNL] Enable accounting of swap in memory resource 2957 [KNL] Enable accounting of swap in memory resource
2958 controller if no parameter or 1 is given or disable 2958 controller if no parameter or 1 is given or disable
2959 it if 0 is given (See Documentation/cgroups/memory.txt) 2959 it if 0 is given (See Documentation/cgroups/memory.txt)
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 1c15043aaee4..d569f2a424d5 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -52,7 +52,7 @@ Default: 64
52 52
53busy_read 53busy_read
54---------------- 54----------------
55Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL) 55Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL)
56Approximate time in us to busy loop waiting for packets on the device queue. 56Approximate time in us to busy loop waiting for packets on the device queue.
57This sets the default value of the SO_BUSY_POLL socket option. 57This sets the default value of the SO_BUSY_POLL socket option.
58Can be set or overridden per socket by setting socket option SO_BUSY_POLL, 58Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
@@ -63,7 +63,7 @@ Default: 0 (off)
63 63
64busy_poll 64busy_poll
65---------------- 65----------------
66Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL) 66Low latency busy poll timeout for poll and select. (needs CONFIG_NET_RX_BUSY_POLL)
67Approximate time in us to busy loop waiting for events. 67Approximate time in us to busy loop waiting for events.
68Recommended value depends on the number of sockets you poll on. 68Recommended value depends on the number of sockets you poll on.
69For several sockets 50, for several hundreds 100. 69For several sockets 50, for several hundreds 100.
diff --git a/MAINTAINERS b/MAINTAINERS
index bf61e04291ab..8197fbd70a3e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -965,6 +965,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
965L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 965L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
966S: Maintained 966S: Maintained
967 967
968ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
969M: Santosh Shilimkar <santosh.shilimkar@ti.com>
970L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
971S: Maintained
972F: arch/arm/mach-keystone/
973
968ARM/LOGICPD PXA270 MACHINE SUPPORT 974ARM/LOGICPD PXA270 MACHINE SUPPORT
969M: Lennert Buytenhek <kernel@wantstofly.org> 975M: Lennert Buytenhek <kernel@wantstofly.org>
970L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1259,7 +1265,6 @@ F: drivers/rtc/rtc-coh901331.c
1259T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git 1265T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
1260 1266
1261ARM/Ux500 ARM ARCHITECTURE 1267ARM/Ux500 ARM ARCHITECTURE
1262M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
1263M: Linus Walleij <linus.walleij@linaro.org> 1268M: Linus Walleij <linus.walleij@linaro.org>
1264L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1269L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1265S: Maintained 1270S: Maintained
@@ -1406,7 +1411,7 @@ ATHEROS ATH6KL WIRELESS DRIVER
1406M: Kalle Valo <kvalo@qca.qualcomm.com> 1411M: Kalle Valo <kvalo@qca.qualcomm.com>
1407L: linux-wireless@vger.kernel.org 1412L: linux-wireless@vger.kernel.org
1408W: http://wireless.kernel.org/en/users/Drivers/ath6kl 1413W: http://wireless.kernel.org/en/users/Drivers/ath6kl
1409T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git 1414T: git git://github.com/kvalo/ath.git
1410S: Supported 1415S: Supported
1411F: drivers/net/wireless/ath/ath6kl/ 1416F: drivers/net/wireless/ath/ath6kl/
1412 1417
@@ -1642,7 +1647,7 @@ S: Maintained
1642F: drivers/net/hamradio/baycom* 1647F: drivers/net/hamradio/baycom*
1643 1648
1644BCACHE (BLOCK LAYER CACHE) 1649BCACHE (BLOCK LAYER CACHE)
1645M: Kent Overstreet <koverstreet@google.com> 1650M: Kent Overstreet <kmo@daterainc.com>
1646L: linux-bcache@vger.kernel.org 1651L: linux-bcache@vger.kernel.org
1647W: http://bcache.evilpiepirate.org 1652W: http://bcache.evilpiepirate.org
1648S: Maintained: 1653S: Maintained:
@@ -2871,7 +2876,7 @@ F: drivers/media/usb/dvb-usb-v2/dvb_usb*
2871F: drivers/media/usb/dvb-usb-v2/usb_urb.c 2876F: drivers/media/usb/dvb-usb-v2/usb_urb.c
2872 2877
2873DYNAMIC DEBUG 2878DYNAMIC DEBUG
2874M: Jason Baron <jbaron@redhat.com> 2879M: Jason Baron <jbaron@akamai.com>
2875S: Maintained 2880S: Maintained
2876F: lib/dynamic_debug.c 2881F: lib/dynamic_debug.c
2877F: include/linux/dynamic_debug.h 2882F: include/linux/dynamic_debug.h
@@ -3346,7 +3351,7 @@ F: Documentation/firmware_class/
3346F: drivers/base/firmware*.c 3351F: drivers/base/firmware*.c
3347F: include/linux/firmware.h 3352F: include/linux/firmware.h
3348 3353
3349FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card) 3354FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card)
3350M: Joshua Morris <josh.h.morris@us.ibm.com> 3355M: Joshua Morris <josh.h.morris@us.ibm.com>
3351M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> 3356M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
3352S: Maintained 3357S: Maintained
@@ -3622,11 +3627,9 @@ F: drivers/isdn/gigaset/
3622F: include/uapi/linux/gigaset_dev.h 3627F: include/uapi/linux/gigaset_dev.h
3623 3628
3624GPIO SUBSYSTEM 3629GPIO SUBSYSTEM
3625M: Grant Likely <grant.likely@linaro.org>
3626M: Linus Walleij <linus.walleij@linaro.org> 3630M: Linus Walleij <linus.walleij@linaro.org>
3627S: Maintained 3631S: Maintained
3628L: linux-gpio@vger.kernel.org 3632L: linux-gpio@vger.kernel.org
3629T: git git://git.secretlab.ca/git/linux-2.6.git
3630F: Documentation/gpio.txt 3633F: Documentation/gpio.txt
3631F: drivers/gpio/ 3634F: drivers/gpio/
3632F: include/linux/gpio* 3635F: include/linux/gpio*
@@ -4472,8 +4475,6 @@ F: drivers/irqchip/
4472 4475
4473IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 4476IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
4474M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 4477M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
4475M: Grant Likely <grant.likely@linaro.org>
4476T: git git://git.secretlab.ca/git/linux-2.6.git irqdomain/next
4477S: Maintained 4478S: Maintained
4478F: Documentation/IRQ-domain.txt 4479F: Documentation/IRQ-domain.txt
4479F: include/linux/irqdomain.h 4480F: include/linux/irqdomain.h
@@ -4990,7 +4991,7 @@ F: arch/powerpc/platforms/44x/
4990 4991
4991LINUX FOR POWERPC EMBEDDED XILINX VIRTEX 4992LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
4992L: linuxppc-dev@lists.ozlabs.org 4993L: linuxppc-dev@lists.ozlabs.org
4993S: Unmaintained 4994S: Orphan
4994F: arch/powerpc/*/*virtex* 4995F: arch/powerpc/*/*virtex*
4995F: arch/powerpc/*/*/*virtex* 4996F: arch/powerpc/*/*/*virtex*
4996 4997
@@ -5580,9 +5581,9 @@ S: Maintained
5580F: drivers/media/tuners/mxl5007t.* 5581F: drivers/media/tuners/mxl5007t.*
5581 5582
5582MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) 5583MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
5583M: Andrew Gallatin <gallatin@myri.com> 5584M: Hyong-Youb Kim <hykim@myri.com>
5584L: netdev@vger.kernel.org 5585L: netdev@vger.kernel.org
5585W: http://www.myri.com/scs/download-Myri10GE.html 5586W: https://www.myricom.com/support/downloads/myri10ge.html
5586S: Supported 5587S: Supported
5587F: drivers/net/ethernet/myricom/myri10ge/ 5588F: drivers/net/ethernet/myricom/myri10ge/
5588 5589
@@ -5883,10 +5884,10 @@ F: drivers/i2c/busses/i2c-omap.c
5883F: include/linux/i2c-omap.h 5884F: include/linux/i2c-omap.h
5884 5885
5885OMAP DEVICE TREE SUPPORT 5886OMAP DEVICE TREE SUPPORT
5886M: Benoît Cousson <b-cousson@ti.com> 5887M: Benoît Cousson <bcousson@baylibre.com>
5887M: Tony Lindgren <tony@atomide.com> 5888M: Tony Lindgren <tony@atomide.com>
5888L: linux-omap@vger.kernel.org 5889L: linux-omap@vger.kernel.org
5889L: devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers) 5890L: devicetree@vger.kernel.org
5890S: Maintained 5891S: Maintained
5891F: arch/arm/boot/dts/*omap* 5892F: arch/arm/boot/dts/*omap*
5892F: arch/arm/boot/dts/*am3* 5893F: arch/arm/boot/dts/*am3*
@@ -5963,14 +5964,14 @@ S: Maintained
5963F: drivers/char/hw_random/omap-rng.c 5964F: drivers/char/hw_random/omap-rng.c
5964 5965
5965OMAP HWMOD SUPPORT 5966OMAP HWMOD SUPPORT
5966M: Benoît Cousson <b-cousson@ti.com> 5967M: Benoît Cousson <bcousson@baylibre.com>
5967M: Paul Walmsley <paul@pwsan.com> 5968M: Paul Walmsley <paul@pwsan.com>
5968L: linux-omap@vger.kernel.org 5969L: linux-omap@vger.kernel.org
5969S: Maintained 5970S: Maintained
5970F: arch/arm/mach-omap2/omap_hwmod.* 5971F: arch/arm/mach-omap2/omap_hwmod.*
5971 5972
5972OMAP HWMOD DATA FOR OMAP4-BASED DEVICES 5973OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
5973M: Benoît Cousson <b-cousson@ti.com> 5974M: Benoît Cousson <bcousson@baylibre.com>
5974L: linux-omap@vger.kernel.org 5975L: linux-omap@vger.kernel.org
5975S: Maintained 5976S: Maintained
5976F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c 5977F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -6050,17 +6051,28 @@ F: drivers/i2c/busses/i2c-ocores.c
6050OPEN FIRMWARE AND FLATTENED DEVICE TREE 6051OPEN FIRMWARE AND FLATTENED DEVICE TREE
6051M: Grant Likely <grant.likely@linaro.org> 6052M: Grant Likely <grant.likely@linaro.org>
6052M: Rob Herring <rob.herring@calxeda.com> 6053M: Rob Herring <rob.herring@calxeda.com>
6053L: devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers) 6054L: devicetree@vger.kernel.org
6054W: http://fdt.secretlab.ca 6055W: http://fdt.secretlab.ca
6055T: git git://git.secretlab.ca/git/linux-2.6.git 6056T: git git://git.secretlab.ca/git/linux-2.6.git
6056S: Maintained 6057S: Maintained
6057F: Documentation/devicetree 6058F: drivers/of/
6058F: drivers/of
6059F: include/linux/of*.h 6059F: include/linux/of*.h
6060F: scripts/dtc 6060F: scripts/dtc/
6061K: of_get_property 6061K: of_get_property
6062K: of_match_table 6062K: of_match_table
6063 6063
6064OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
6065M: Rob Herring <rob.herring@calxeda.com>
6066M: Pawel Moll <pawel.moll@arm.com>
6067M: Mark Rutland <mark.rutland@arm.com>
6068M: Stephen Warren <swarren@wwwdotorg.org>
6069M: Ian Campbell <ian.campbell@citrix.com>
6070L: devicetree@vger.kernel.org
6071S: Maintained
6072F: Documentation/devicetree/
6073F: arch/*/boot/dts/
6074F: include/dt-bindings/
6075
6064OPENRISC ARCHITECTURE 6076OPENRISC ARCHITECTURE
6065M: Jonas Bonn <jonas@southpole.se> 6077M: Jonas Bonn <jonas@southpole.se>
6066W: http://openrisc.net 6078W: http://openrisc.net
@@ -6719,6 +6731,14 @@ T: git git://linuxtv.org/anttip/media_tree.git
6719S: Maintained 6731S: Maintained
6720F: drivers/media/tuners/qt1010* 6732F: drivers/media/tuners/qt1010*
6721 6733
6734QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
6735M: Kalle Valo <kvalo@qca.qualcomm.com>
6736L: ath10k@lists.infradead.org
6737W: http://wireless.kernel.org/en/users/Drivers/ath10k
6738T: git git://github.com/kvalo/ath.git
6739S: Supported
6740F: drivers/net/wireless/ath/ath10k/
6741
6722QUALCOMM HEXAGON ARCHITECTURE 6742QUALCOMM HEXAGON ARCHITECTURE
6723M: Richard Kuo <rkuo@codeaurora.org> 6743M: Richard Kuo <rkuo@codeaurora.org>
6724L: linux-hexagon@vger.kernel.org 6744L: linux-hexagon@vger.kernel.org
@@ -7346,7 +7366,6 @@ F: drivers/net/ethernet/sfc/
7346 7366
7347SGI GRU DRIVER 7367SGI GRU DRIVER
7348M: Dimitri Sivanich <sivanich@sgi.com> 7368M: Dimitri Sivanich <sivanich@sgi.com>
7349M: Robin Holt <holt@sgi.com>
7350S: Maintained 7369S: Maintained
7351F: drivers/misc/sgi-gru/ 7370F: drivers/misc/sgi-gru/
7352 7371
@@ -7366,7 +7385,8 @@ S: Maintained for 2.6.
7366F: Documentation/sgi-visws.txt 7385F: Documentation/sgi-visws.txt
7367 7386
7368SGI XP/XPC/XPNET DRIVER 7387SGI XP/XPC/XPNET DRIVER
7369M: Robin Holt <holt@sgi.com> 7388M: Cliff Whickman <cpw@sgi.com>
7389M: Robin Holt <robinmholt@gmail.com>
7370S: Maintained 7390S: Maintained
7371F: drivers/misc/sgi-xp/ 7391F: drivers/misc/sgi-xp/
7372 7392
@@ -7746,7 +7766,6 @@ F: drivers/clk/spear/
7746 7766
7747SPI SUBSYSTEM 7767SPI SUBSYSTEM
7748M: Mark Brown <broonie@kernel.org> 7768M: Mark Brown <broonie@kernel.org>
7749M: Grant Likely <grant.likely@linaro.org>
7750L: linux-spi@vger.kernel.org 7769L: linux-spi@vger.kernel.org
7751T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git 7770T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
7752Q: http://patchwork.kernel.org/project/spi-devel-general/list/ 7771Q: http://patchwork.kernel.org/project/spi-devel-general/list/
@@ -7812,7 +7831,7 @@ F: drivers/staging/asus_oled/
7812 7831
7813STAGING - COMEDI 7832STAGING - COMEDI
7814M: Ian Abbott <abbotti@mev.co.uk> 7833M: Ian Abbott <abbotti@mev.co.uk>
7815M: Mori Hess <fmhess@users.sourceforge.net> 7834M: H Hartley Sweeten <hsweeten@visionengravers.com>
7816S: Odd Fixes 7835S: Odd Fixes
7817F: drivers/staging/comedi/ 7836F: drivers/staging/comedi/
7818 7837
@@ -8264,7 +8283,7 @@ S: Maintained
8264F: sound/soc/codecs/twl4030* 8283F: sound/soc/codecs/twl4030*
8265 8284
8266TI WILINK WIRELESS DRIVERS 8285TI WILINK WIRELESS DRIVERS
8267M: Luciano Coelho <coelho@ti.com> 8286M: Luciano Coelho <luca@coelho.fi>
8268L: linux-wireless@vger.kernel.org 8287L: linux-wireless@vger.kernel.org
8269W: http://wireless.kernel.org/en/users/Drivers/wl12xx 8288W: http://wireless.kernel.org/en/users/Drivers/wl12xx
8270W: http://wireless.kernel.org/en/users/Drivers/wl1251 8289W: http://wireless.kernel.org/en/users/Drivers/wl1251
@@ -8650,6 +8669,11 @@ T: git git://git.alsa-project.org/alsa-kernel.git
8650S: Maintained 8669S: Maintained
8651F: sound/usb/midi.* 8670F: sound/usb/midi.*
8652 8671
8672USB NETWORKING DRIVERS
8673L: linux-usb@vger.kernel.org
8674S: Odd Fixes
8675F: drivers/net/usb/
8676
8653USB OHCI DRIVER 8677USB OHCI DRIVER
8654M: Alan Stern <stern@rowland.harvard.edu> 8678M: Alan Stern <stern@rowland.harvard.edu>
8655L: linux-usb@vger.kernel.org 8679L: linux-usb@vger.kernel.org
@@ -9288,7 +9312,7 @@ S: Maintained
9288F: drivers/net/ethernet/xilinx/xilinx_axienet* 9312F: drivers/net/ethernet/xilinx/xilinx_axienet*
9289 9313
9290XILINX SYSTEMACE DRIVER 9314XILINX SYSTEMACE DRIVER
9291S: Unmaintained 9315S: Orphan
9292F: drivers/block/xsysace.c 9316F: drivers/block/xsysace.c
9293 9317
9294XILINX UARTLITE SERIAL DRIVER 9318XILINX UARTLITE SERIAL DRIVER
diff --git a/Makefile b/Makefile
index a35f72a420c0..369882e4fc77 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc7
5NAME = Linux for Workgroups 5NAME = Linux for Workgroups
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 8d2ae24b9f4a..1feb169274fe 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2
407 help 407 help
408 Architecture has the first two arguments of clone(2) swapped. 408 Architecture has the first two arguments of clone(2) swapped.
409 409
410config CLONE_BACKWARDS3
411 bool
412 help
413 Architecture has tls passed as the 3rd argument of clone(2),
414 not the 5th one.
415
410config ODD_RT_SIGACTION 416config ODD_RT_SIGACTION
411 bool 417 bool
412 help 418 help
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 837a1f2d8b96..082d9b4b5472 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -15,6 +15,7 @@ config ALPHA
15 select ARCH_WANT_OPTIONAL_GPIOLIB 15 select ARCH_WANT_OPTIONAL_GPIOLIB
16 select ARCH_WANT_IPC_PARSE_VERSION 16 select ARCH_WANT_IPC_PARSE_VERSION
17 select ARCH_HAVE_NMI_SAFE_CMPXCHG 17 select ARCH_HAVE_NMI_SAFE_CMPXCHG
18 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
18 select GENERIC_SMP_IDLE_THREAD 19 select GENERIC_SMP_IDLE_THREAD
19 select GENERIC_CMOS_UPDATE 20 select GENERIC_CMOS_UPDATE
20 select GENERIC_STRNCPY_FROM_USER 21 select GENERIC_STRNCPY_FROM_USER
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index c2cbe4fc391c..78b03ef39f6f 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
186 */ 186 */
187static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 187static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
188{ 188{
189 int c, old; 189 int c, new, old;
190 c = atomic_read(v); 190 smp_mb();
191 for (;;) { 191 __asm__ __volatile__(
192 if (unlikely(c == (u))) 192 "1: ldl_l %[old],%[mem]\n"
193 break; 193 " cmpeq %[old],%[u],%[c]\n"
194 old = atomic_cmpxchg((v), c, c + (a)); 194 " addl %[old],%[a],%[new]\n"
195 if (likely(old == c)) 195 " bne %[c],2f\n"
196 break; 196 " stl_c %[new],%[mem]\n"
197 c = old; 197 " beq %[new],3f\n"
198 } 198 "2:\n"
199 return c; 199 ".subsection 2\n"
200 "3: br 1b\n"
201 ".previous"
202 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
203 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
204 : "memory");
205 smp_mb();
206 return old;
200} 207}
201 208
202 209
@@ -207,21 +214,56 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
207 * @u: ...unless v is equal to u. 214 * @u: ...unless v is equal to u.
208 * 215 *
209 * Atomically adds @a to @v, so long as it was not @u. 216 * Atomically adds @a to @v, so long as it was not @u.
210 * Returns the old value of @v. 217 * Returns true iff @v was not @u.
211 */ 218 */
212static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 219static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
213{ 220{
214 long c, old; 221 long c, tmp;
215 c = atomic64_read(v); 222 smp_mb();
216 for (;;) { 223 __asm__ __volatile__(
217 if (unlikely(c == (u))) 224 "1: ldq_l %[tmp],%[mem]\n"
218 break; 225 " cmpeq %[tmp],%[u],%[c]\n"
219 old = atomic64_cmpxchg((v), c, c + (a)); 226 " addq %[tmp],%[a],%[tmp]\n"
220 if (likely(old == c)) 227 " bne %[c],2f\n"
221 break; 228 " stq_c %[tmp],%[mem]\n"
222 c = old; 229 " beq %[tmp],3f\n"
223 } 230 "2:\n"
224 return c != (u); 231 ".subsection 2\n"
232 "3: br 1b\n"
233 ".previous"
234 : [tmp] "=&r"(tmp), [c] "=&r"(c)
235 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
236 : "memory");
237 smp_mb();
238 return !c;
239}
240
241/*
242 * atomic64_dec_if_positive - decrement by 1 if old value positive
243 * @v: pointer of type atomic_t
244 *
245 * The function returns the old value of *v minus 1, even if
246 * the atomic variable, v, was not decremented.
247 */
248static inline long atomic64_dec_if_positive(atomic64_t *v)
249{
250 long old, tmp;
251 smp_mb();
252 __asm__ __volatile__(
253 "1: ldq_l %[old],%[mem]\n"
254 " subq %[old],1,%[tmp]\n"
255 " ble %[old],2f\n"
256 " stq_c %[tmp],%[mem]\n"
257 " beq %[tmp],3f\n"
258 "2:\n"
259 ".subsection 2\n"
260 "3: br 1b\n"
261 ".previous"
262 : [old] "=&r"(old), [tmp] "=&r"(tmp)
263 : [mem] "m"(*v)
264 : "memory");
265 smp_mb();
266 return old - 1;
225} 267}
226 268
227#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 269#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/alpha/include/asm/param.h b/arch/alpha/include/asm/param.h
index bf46af51941b..a5b68b268bcf 100644
--- a/arch/alpha/include/asm/param.h
+++ b/arch/alpha/include/asm/param.h
@@ -3,7 +3,9 @@
3 3
4#include <uapi/asm/param.h> 4#include <uapi/asm/param.h>
5 5
6#define HZ CONFIG_HZ 6# undef HZ
7#define USER_HZ HZ 7# define HZ CONFIG_HZ
8# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ 8# define USER_HZ 1024
9# define CLOCKS_PER_SEC USER_HZ /* frequency at which times() counts */
10
9#endif /* _ASM_ALPHA_PARAM_H */ 11#endif /* _ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index 3bba21e41b81..37b570d01202 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -168,8 +168,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
168#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 168#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
169#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 169#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
170 170
171#define arch_spin_relax(lock) cpu_relax()
172#define arch_read_relax(lock) cpu_relax()
173#define arch_write_relax(lock) cpu_relax()
174
175#endif /* _ALPHA_SPINLOCK_H */ 171#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 43baee17acdf..f2c94402e2c8 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -3,8 +3,7 @@
3 3
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6#define NR_SYSCALLS 508
7#define NR_SYSCALLS 506
8 7
9#define __ARCH_WANT_OLD_READDIR 8#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_STAT64 9#define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/uapi/asm/param.h b/arch/alpha/include/uapi/asm/param.h
index 29daed819ebd..dbcd9834af6d 100644
--- a/arch/alpha/include/uapi/asm/param.h
+++ b/arch/alpha/include/uapi/asm/param.h
@@ -1,13 +1,7 @@
1#ifndef _UAPI_ASM_ALPHA_PARAM_H 1#ifndef _UAPI_ASM_ALPHA_PARAM_H
2#define _UAPI_ASM_ALPHA_PARAM_H 2#define _UAPI_ASM_ALPHA_PARAM_H
3 3
4/* ??? Gross. I don't want to parameterize this, and supposedly the
5 hardware ignores reprogramming. We also need userland buy-in to the
6 change in HZ, since this is visible in the wait4 resources etc. */
7
8#ifndef __KERNEL__
9#define HZ 1024 4#define HZ 1024
10#endif
11 5
12#define EXEC_PAGESIZE 8192 6#define EXEC_PAGESIZE 8192
13 7
@@ -17,5 +11,4 @@
17 11
18#define MAXHOSTNAMELEN 64 /* max length of hostname */ 12#define MAXHOSTNAMELEN 64 /* max length of hostname */
19 13
20
21#endif /* _UAPI_ASM_ALPHA_PARAM_H */ 14#endif /* _UAPI_ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h
index 801d28bcea51..53ae7bb1bfd1 100644
--- a/arch/alpha/include/uapi/asm/unistd.h
+++ b/arch/alpha/include/uapi/asm/unistd.h
@@ -467,5 +467,7 @@
467#define __NR_sendmmsg 503 467#define __NR_sendmmsg 503
468#define __NR_process_vm_readv 504 468#define __NR_process_vm_readv 504
469#define __NR_process_vm_writev 505 469#define __NR_process_vm_writev 505
470#define __NR_kcmp 506
471#define __NR_finit_module 507
470 472
471#endif /* _UAPI_ALPHA_UNISTD_H */ 473#endif /* _UAPI_ALPHA_UNISTD_H */
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index f62a994ef126..a969b95ee5ac 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -12,11 +12,32 @@
12 12
13 .text 13 .text
14 .set noat 14 .set noat
15 .cfi_sections .debug_frame
15 16
16/* Stack offsets. */ 17/* Stack offsets. */
17#define SP_OFF 184 18#define SP_OFF 184
18#define SWITCH_STACK_SIZE 320 19#define SWITCH_STACK_SIZE 320
19 20
21.macro CFI_START_OSF_FRAME func
22 .align 4
23 .globl \func
24 .type \func,@function
25\func:
26 .cfi_startproc simple
27 .cfi_return_column 64
28 .cfi_def_cfa $sp, 48
29 .cfi_rel_offset 64, 8
30 .cfi_rel_offset $gp, 16
31 .cfi_rel_offset $16, 24
32 .cfi_rel_offset $17, 32
33 .cfi_rel_offset $18, 40
34.endm
35
36.macro CFI_END_OSF_FRAME func
37 .cfi_endproc
38 .size \func, . - \func
39.endm
40
20/* 41/*
21 * This defines the normal kernel pt-regs layout. 42 * This defines the normal kernel pt-regs layout.
22 * 43 *
@@ -27,100 +48,158 @@
27 * the palcode-provided values are available to the signal handler. 48 * the palcode-provided values are available to the signal handler.
28 */ 49 */
29 50
30#define SAVE_ALL \ 51.macro SAVE_ALL
31 subq $sp, SP_OFF, $sp; \ 52 subq $sp, SP_OFF, $sp
32 stq $0, 0($sp); \ 53 .cfi_adjust_cfa_offset SP_OFF
33 stq $1, 8($sp); \ 54 stq $0, 0($sp)
34 stq $2, 16($sp); \ 55 stq $1, 8($sp)
35 stq $3, 24($sp); \ 56 stq $2, 16($sp)
36 stq $4, 32($sp); \ 57 stq $3, 24($sp)
37 stq $28, 144($sp); \ 58 stq $4, 32($sp)
38 lda $2, alpha_mv; \ 59 stq $28, 144($sp)
39 stq $5, 40($sp); \ 60 .cfi_rel_offset $0, 0
40 stq $6, 48($sp); \ 61 .cfi_rel_offset $1, 8
41 stq $7, 56($sp); \ 62 .cfi_rel_offset $2, 16
42 stq $8, 64($sp); \ 63 .cfi_rel_offset $3, 24
43 stq $19, 72($sp); \ 64 .cfi_rel_offset $4, 32
44 stq $20, 80($sp); \ 65 .cfi_rel_offset $28, 144
45 stq $21, 88($sp); \ 66 lda $2, alpha_mv
46 ldq $2, HAE_CACHE($2); \ 67 stq $5, 40($sp)
47 stq $22, 96($sp); \ 68 stq $6, 48($sp)
48 stq $23, 104($sp); \ 69 stq $7, 56($sp)
49 stq $24, 112($sp); \ 70 stq $8, 64($sp)
50 stq $25, 120($sp); \ 71 stq $19, 72($sp)
51 stq $26, 128($sp); \ 72 stq $20, 80($sp)
52 stq $27, 136($sp); \ 73 stq $21, 88($sp)
53 stq $2, 152($sp); \ 74 ldq $2, HAE_CACHE($2)
54 stq $16, 160($sp); \ 75 stq $22, 96($sp)
55 stq $17, 168($sp); \ 76 stq $23, 104($sp)
77 stq $24, 112($sp)
78 stq $25, 120($sp)
79 stq $26, 128($sp)
80 stq $27, 136($sp)
81 stq $2, 152($sp)
82 stq $16, 160($sp)
83 stq $17, 168($sp)
56 stq $18, 176($sp) 84 stq $18, 176($sp)
85 .cfi_rel_offset $5, 40
86 .cfi_rel_offset $6, 48
87 .cfi_rel_offset $7, 56
88 .cfi_rel_offset $8, 64
89 .cfi_rel_offset $19, 72
90 .cfi_rel_offset $20, 80
91 .cfi_rel_offset $21, 88
92 .cfi_rel_offset $22, 96
93 .cfi_rel_offset $23, 104
94 .cfi_rel_offset $24, 112
95 .cfi_rel_offset $25, 120
96 .cfi_rel_offset $26, 128
97 .cfi_rel_offset $27, 136
98.endm
57 99
58#define RESTORE_ALL \ 100.macro RESTORE_ALL
59 lda $19, alpha_mv; \ 101 lda $19, alpha_mv
60 ldq $0, 0($sp); \ 102 ldq $0, 0($sp)
61 ldq $1, 8($sp); \ 103 ldq $1, 8($sp)
62 ldq $2, 16($sp); \ 104 ldq $2, 16($sp)
63 ldq $3, 24($sp); \ 105 ldq $3, 24($sp)
64 ldq $21, 152($sp); \ 106 ldq $21, 152($sp)
65 ldq $20, HAE_CACHE($19); \ 107 ldq $20, HAE_CACHE($19)
66 ldq $4, 32($sp); \ 108 ldq $4, 32($sp)
67 ldq $5, 40($sp); \ 109 ldq $5, 40($sp)
68 ldq $6, 48($sp); \ 110 ldq $6, 48($sp)
69 ldq $7, 56($sp); \ 111 ldq $7, 56($sp)
70 subq $20, $21, $20; \ 112 subq $20, $21, $20
71 ldq $8, 64($sp); \ 113 ldq $8, 64($sp)
72 beq $20, 99f; \ 114 beq $20, 99f
73 ldq $20, HAE_REG($19); \ 115 ldq $20, HAE_REG($19)
74 stq $21, HAE_CACHE($19); \ 116 stq $21, HAE_CACHE($19)
75 stq $21, 0($20); \ 117 stq $21, 0($20)
7699:; \ 11899: ldq $19, 72($sp)
77 ldq $19, 72($sp); \ 119 ldq $20, 80($sp)
78 ldq $20, 80($sp); \ 120 ldq $21, 88($sp)
79 ldq $21, 88($sp); \ 121 ldq $22, 96($sp)
80 ldq $22, 96($sp); \ 122 ldq $23, 104($sp)
81 ldq $23, 104($sp); \ 123 ldq $24, 112($sp)
82 ldq $24, 112($sp); \ 124 ldq $25, 120($sp)
83 ldq $25, 120($sp); \ 125 ldq $26, 128($sp)
84 ldq $26, 128($sp); \ 126 ldq $27, 136($sp)
85 ldq $27, 136($sp); \ 127 ldq $28, 144($sp)
86 ldq $28, 144($sp); \
87 addq $sp, SP_OFF, $sp 128 addq $sp, SP_OFF, $sp
129 .cfi_restore $0
130 .cfi_restore $1
131 .cfi_restore $2
132 .cfi_restore $3
133 .cfi_restore $4
134 .cfi_restore $5
135 .cfi_restore $6
136 .cfi_restore $7
137 .cfi_restore $8
138 .cfi_restore $19
139 .cfi_restore $20
140 .cfi_restore $21
141 .cfi_restore $22
142 .cfi_restore $23
143 .cfi_restore $24
144 .cfi_restore $25
145 .cfi_restore $26
146 .cfi_restore $27
147 .cfi_restore $28
148 .cfi_adjust_cfa_offset -SP_OFF
149.endm
150
151.macro DO_SWITCH_STACK
152 bsr $1, do_switch_stack
153 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
154 .cfi_rel_offset $9, 0
155 .cfi_rel_offset $10, 8
156 .cfi_rel_offset $11, 16
157 .cfi_rel_offset $12, 24
158 .cfi_rel_offset $13, 32
159 .cfi_rel_offset $14, 40
160 .cfi_rel_offset $15, 48
161 /* We don't really care about the FP registers for debugging. */
162.endm
163
164.macro UNDO_SWITCH_STACK
165 bsr $1, undo_switch_stack
166 .cfi_restore $9
167 .cfi_restore $10
168 .cfi_restore $11
169 .cfi_restore $12
170 .cfi_restore $13
171 .cfi_restore $14
172 .cfi_restore $15
173 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE
174.endm
88 175
89/* 176/*
90 * Non-syscall kernel entry points. 177 * Non-syscall kernel entry points.
91 */ 178 */
92 179
93 .align 4 180CFI_START_OSF_FRAME entInt
94 .globl entInt
95 .ent entInt
96entInt:
97 SAVE_ALL 181 SAVE_ALL
98 lda $8, 0x3fff 182 lda $8, 0x3fff
99 lda $26, ret_from_sys_call 183 lda $26, ret_from_sys_call
100 bic $sp, $8, $8 184 bic $sp, $8, $8
101 mov $sp, $19 185 mov $sp, $19
102 jsr $31, do_entInt 186 jsr $31, do_entInt
103.end entInt 187CFI_END_OSF_FRAME entInt
104 188
105 .align 4 189CFI_START_OSF_FRAME entArith
106 .globl entArith
107 .ent entArith
108entArith:
109 SAVE_ALL 190 SAVE_ALL
110 lda $8, 0x3fff 191 lda $8, 0x3fff
111 lda $26, ret_from_sys_call 192 lda $26, ret_from_sys_call
112 bic $sp, $8, $8 193 bic $sp, $8, $8
113 mov $sp, $18 194 mov $sp, $18
114 jsr $31, do_entArith 195 jsr $31, do_entArith
115.end entArith 196CFI_END_OSF_FRAME entArith
116 197
117 .align 4 198CFI_START_OSF_FRAME entMM
118 .globl entMM
119 .ent entMM
120entMM:
121 SAVE_ALL 199 SAVE_ALL
122/* save $9 - $15 so the inline exception code can manipulate them. */ 200/* save $9 - $15 so the inline exception code can manipulate them. */
123 subq $sp, 56, $sp 201 subq $sp, 56, $sp
202 .cfi_adjust_cfa_offset 56
124 stq $9, 0($sp) 203 stq $9, 0($sp)
125 stq $10, 8($sp) 204 stq $10, 8($sp)
126 stq $11, 16($sp) 205 stq $11, 16($sp)
@@ -128,6 +207,13 @@ entMM:
128 stq $13, 32($sp) 207 stq $13, 32($sp)
129 stq $14, 40($sp) 208 stq $14, 40($sp)
130 stq $15, 48($sp) 209 stq $15, 48($sp)
210 .cfi_rel_offset $9, 0
211 .cfi_rel_offset $10, 8
212 .cfi_rel_offset $11, 16
213 .cfi_rel_offset $12, 24
214 .cfi_rel_offset $13, 32
215 .cfi_rel_offset $14, 40
216 .cfi_rel_offset $15, 48
131 addq $sp, 56, $19 217 addq $sp, 56, $19
132/* handle the fault */ 218/* handle the fault */
133 lda $8, 0x3fff 219 lda $8, 0x3fff
@@ -142,28 +228,33 @@ entMM:
142 ldq $14, 40($sp) 228 ldq $14, 40($sp)
143 ldq $15, 48($sp) 229 ldq $15, 48($sp)
144 addq $sp, 56, $sp 230 addq $sp, 56, $sp
231 .cfi_restore $9
232 .cfi_restore $10
233 .cfi_restore $11
234 .cfi_restore $12
235 .cfi_restore $13
236 .cfi_restore $14
237 .cfi_restore $15
238 .cfi_adjust_cfa_offset -56
145/* finish up the syscall as normal. */ 239/* finish up the syscall as normal. */
146 br ret_from_sys_call 240 br ret_from_sys_call
147.end entMM 241CFI_END_OSF_FRAME entMM
148 242
149 .align 4 243CFI_START_OSF_FRAME entIF
150 .globl entIF
151 .ent entIF
152entIF:
153 SAVE_ALL 244 SAVE_ALL
154 lda $8, 0x3fff 245 lda $8, 0x3fff
155 lda $26, ret_from_sys_call 246 lda $26, ret_from_sys_call
156 bic $sp, $8, $8 247 bic $sp, $8, $8
157 mov $sp, $17 248 mov $sp, $17
158 jsr $31, do_entIF 249 jsr $31, do_entIF
159.end entIF 250CFI_END_OSF_FRAME entIF
160 251
161 .align 4 252CFI_START_OSF_FRAME entUna
162 .globl entUna
163 .ent entUna
164entUna:
165 lda $sp, -256($sp) 253 lda $sp, -256($sp)
254 .cfi_adjust_cfa_offset 256
166 stq $0, 0($sp) 255 stq $0, 0($sp)
256 .cfi_rel_offset $0, 0
257 .cfi_remember_state
167 ldq $0, 256($sp) /* get PS */ 258 ldq $0, 256($sp) /* get PS */
168 stq $1, 8($sp) 259 stq $1, 8($sp)
169 stq $2, 16($sp) 260 stq $2, 16($sp)
@@ -195,6 +286,32 @@ entUna:
195 stq $28, 224($sp) 286 stq $28, 224($sp)
196 mov $sp, $19 287 mov $sp, $19
197 stq $gp, 232($sp) 288 stq $gp, 232($sp)
289 .cfi_rel_offset $1, 1*8
290 .cfi_rel_offset $2, 2*8
291 .cfi_rel_offset $3, 3*8
292 .cfi_rel_offset $4, 4*8
293 .cfi_rel_offset $5, 5*8
294 .cfi_rel_offset $6, 6*8
295 .cfi_rel_offset $7, 7*8
296 .cfi_rel_offset $8, 8*8
297 .cfi_rel_offset $9, 9*8
298 .cfi_rel_offset $10, 10*8
299 .cfi_rel_offset $11, 11*8
300 .cfi_rel_offset $12, 12*8
301 .cfi_rel_offset $13, 13*8
302 .cfi_rel_offset $14, 14*8
303 .cfi_rel_offset $15, 15*8
304 .cfi_rel_offset $19, 19*8
305 .cfi_rel_offset $20, 20*8
306 .cfi_rel_offset $21, 21*8
307 .cfi_rel_offset $22, 22*8
308 .cfi_rel_offset $23, 23*8
309 .cfi_rel_offset $24, 24*8
310 .cfi_rel_offset $25, 25*8
311 .cfi_rel_offset $26, 26*8
312 .cfi_rel_offset $27, 27*8
313 .cfi_rel_offset $28, 28*8
314 .cfi_rel_offset $29, 29*8
198 lda $8, 0x3fff 315 lda $8, 0x3fff
199 stq $31, 248($sp) 316 stq $31, 248($sp)
200 bic $sp, $8, $8 317 bic $sp, $8, $8
@@ -228,16 +345,45 @@ entUna:
228 ldq $28, 224($sp) 345 ldq $28, 224($sp)
229 ldq $gp, 232($sp) 346 ldq $gp, 232($sp)
230 lda $sp, 256($sp) 347 lda $sp, 256($sp)
348 .cfi_restore $1
349 .cfi_restore $2
350 .cfi_restore $3
351 .cfi_restore $4
352 .cfi_restore $5
353 .cfi_restore $6
354 .cfi_restore $7
355 .cfi_restore $8
356 .cfi_restore $9
357 .cfi_restore $10
358 .cfi_restore $11
359 .cfi_restore $12
360 .cfi_restore $13
361 .cfi_restore $14
362 .cfi_restore $15
363 .cfi_restore $19
364 .cfi_restore $20
365 .cfi_restore $21
366 .cfi_restore $22
367 .cfi_restore $23
368 .cfi_restore $24
369 .cfi_restore $25
370 .cfi_restore $26
371 .cfi_restore $27
372 .cfi_restore $28
373 .cfi_restore $29
374 .cfi_adjust_cfa_offset -256
231 call_pal PAL_rti 375 call_pal PAL_rti
232.end entUna
233 376
234 .align 4 377 .align 4
235 .ent entUnaUser
236entUnaUser: 378entUnaUser:
379 .cfi_restore_state
237 ldq $0, 0($sp) /* restore original $0 */ 380 ldq $0, 0($sp) /* restore original $0 */
238 lda $sp, 256($sp) /* pop entUna's stack frame */ 381 lda $sp, 256($sp) /* pop entUna's stack frame */
382 .cfi_restore $0
383 .cfi_adjust_cfa_offset -256
239 SAVE_ALL /* setup normal kernel stack */ 384 SAVE_ALL /* setup normal kernel stack */
240 lda $sp, -56($sp) 385 lda $sp, -56($sp)
386 .cfi_adjust_cfa_offset 56
241 stq $9, 0($sp) 387 stq $9, 0($sp)
242 stq $10, 8($sp) 388 stq $10, 8($sp)
243 stq $11, 16($sp) 389 stq $11, 16($sp)
@@ -245,6 +391,13 @@ entUnaUser:
245 stq $13, 32($sp) 391 stq $13, 32($sp)
246 stq $14, 40($sp) 392 stq $14, 40($sp)
247 stq $15, 48($sp) 393 stq $15, 48($sp)
394 .cfi_rel_offset $9, 0
395 .cfi_rel_offset $10, 8
396 .cfi_rel_offset $11, 16
397 .cfi_rel_offset $12, 24
398 .cfi_rel_offset $13, 32
399 .cfi_rel_offset $14, 40
400 .cfi_rel_offset $15, 48
248 lda $8, 0x3fff 401 lda $8, 0x3fff
249 addq $sp, 56, $19 402 addq $sp, 56, $19
250 bic $sp, $8, $8 403 bic $sp, $8, $8
@@ -257,20 +410,25 @@ entUnaUser:
257 ldq $14, 40($sp) 410 ldq $14, 40($sp)
258 ldq $15, 48($sp) 411 ldq $15, 48($sp)
259 lda $sp, 56($sp) 412 lda $sp, 56($sp)
413 .cfi_restore $9
414 .cfi_restore $10
415 .cfi_restore $11
416 .cfi_restore $12
417 .cfi_restore $13
418 .cfi_restore $14
419 .cfi_restore $15
420 .cfi_adjust_cfa_offset -56
260 br ret_from_sys_call 421 br ret_from_sys_call
261.end entUnaUser 422CFI_END_OSF_FRAME entUna
262 423
263 .align 4 424CFI_START_OSF_FRAME entDbg
264 .globl entDbg
265 .ent entDbg
266entDbg:
267 SAVE_ALL 425 SAVE_ALL
268 lda $8, 0x3fff 426 lda $8, 0x3fff
269 lda $26, ret_from_sys_call 427 lda $26, ret_from_sys_call
270 bic $sp, $8, $8 428 bic $sp, $8, $8
271 mov $sp, $16 429 mov $sp, $16
272 jsr $31, do_entDbg 430 jsr $31, do_entDbg
273.end entDbg 431CFI_END_OSF_FRAME entDbg
274 432
275/* 433/*
276 * The system call entry point is special. Most importantly, it looks 434 * The system call entry point is special. Most importantly, it looks
@@ -285,8 +443,12 @@ entDbg:
285 443
286 .align 4 444 .align 4
287 .globl entSys 445 .globl entSys
288 .globl ret_from_sys_call 446 .type entSys, @function
289 .ent entSys 447 .cfi_startproc simple
448 .cfi_return_column 64
449 .cfi_def_cfa $sp, 48
450 .cfi_rel_offset 64, 8
451 .cfi_rel_offset $gp, 16
290entSys: 452entSys:
291 SAVE_ALL 453 SAVE_ALL
292 lda $8, 0x3fff 454 lda $8, 0x3fff
@@ -300,6 +462,9 @@ entSys:
300 stq $17, SP_OFF+32($sp) 462 stq $17, SP_OFF+32($sp)
301 s8addq $0, $5, $5 463 s8addq $0, $5, $5
302 stq $18, SP_OFF+40($sp) 464 stq $18, SP_OFF+40($sp)
465 .cfi_rel_offset $16, SP_OFF+24
466 .cfi_rel_offset $17, SP_OFF+32
467 .cfi_rel_offset $18, SP_OFF+40
303 blbs $3, strace 468 blbs $3, strace
304 beq $4, 1f 469 beq $4, 1f
305 ldq $27, 0($5) 470 ldq $27, 0($5)
@@ -310,6 +475,7 @@ entSys:
310 stq $31, 72($sp) /* a3=0 => no error */ 475 stq $31, 72($sp) /* a3=0 => no error */
311 476
312 .align 4 477 .align 4
478 .globl ret_from_sys_call
313ret_from_sys_call: 479ret_from_sys_call:
314 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */ 480 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */
315 ldq $0, SP_OFF($sp) 481 ldq $0, SP_OFF($sp)
@@ -324,10 +490,12 @@ ret_to_user:
324 and $17, _TIF_WORK_MASK, $2 490 and $17, _TIF_WORK_MASK, $2
325 bne $2, work_pending 491 bne $2, work_pending
326restore_all: 492restore_all:
493 .cfi_remember_state
327 RESTORE_ALL 494 RESTORE_ALL
328 call_pal PAL_rti 495 call_pal PAL_rti
329 496
330ret_to_kernel: 497ret_to_kernel:
498 .cfi_restore_state
331 lda $16, 7 499 lda $16, 7
332 call_pal PAL_swpipl 500 call_pal PAL_swpipl
333 br restore_all 501 br restore_all
@@ -356,7 +524,6 @@ $ret_success:
356 stq $0, 0($sp) 524 stq $0, 0($sp)
357 stq $31, 72($sp) /* a3=0 => no error */ 525 stq $31, 72($sp) /* a3=0 => no error */
358 br ret_from_sys_call 526 br ret_from_sys_call
359.end entSys
360 527
361/* 528/*
362 * Do all cleanup when returning from all interrupts and system calls. 529 * Do all cleanup when returning from all interrupts and system calls.
@@ -370,7 +537,7 @@ $ret_success:
370 */ 537 */
371 538
372 .align 4 539 .align 4
373 .ent work_pending 540 .type work_pending, @function
374work_pending: 541work_pending:
375 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2 542 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2
376 bne $2, $work_notifysig 543 bne $2, $work_notifysig
@@ -387,23 +554,22 @@ $work_resched:
387 554
388$work_notifysig: 555$work_notifysig:
389 mov $sp, $16 556 mov $sp, $16
390 bsr $1, do_switch_stack 557 DO_SWITCH_STACK
391 jsr $26, do_work_pending 558 jsr $26, do_work_pending
392 bsr $1, undo_switch_stack 559 UNDO_SWITCH_STACK
393 br restore_all 560 br restore_all
394.end work_pending
395 561
396/* 562/*
397 * PTRACE syscall handler 563 * PTRACE syscall handler
398 */ 564 */
399 565
400 .align 4 566 .align 4
401 .ent strace 567 .type strace, @function
402strace: 568strace:
403 /* set up signal stack, call syscall_trace */ 569 /* set up signal stack, call syscall_trace */
404 bsr $1, do_switch_stack 570 DO_SWITCH_STACK
405 jsr $26, syscall_trace_enter /* returns the syscall number */ 571 jsr $26, syscall_trace_enter /* returns the syscall number */
406 bsr $1, undo_switch_stack 572 UNDO_SWITCH_STACK
407 573
408 /* get the arguments back.. */ 574 /* get the arguments back.. */
409 ldq $16, SP_OFF+24($sp) 575 ldq $16, SP_OFF+24($sp)
@@ -431,9 +597,9 @@ ret_from_straced:
431$strace_success: 597$strace_success:
432 stq $0, 0($sp) /* save return value */ 598 stq $0, 0($sp) /* save return value */
433 599
434 bsr $1, do_switch_stack 600 DO_SWITCH_STACK
435 jsr $26, syscall_trace_leave 601 jsr $26, syscall_trace_leave
436 bsr $1, undo_switch_stack 602 UNDO_SWITCH_STACK
437 br $31, ret_from_sys_call 603 br $31, ret_from_sys_call
438 604
439 .align 3 605 .align 3
@@ -447,26 +613,31 @@ $strace_error:
447 stq $0, 0($sp) 613 stq $0, 0($sp)
448 stq $1, 72($sp) /* a3 for return */ 614 stq $1, 72($sp) /* a3 for return */
449 615
450 bsr $1, do_switch_stack 616 DO_SWITCH_STACK
451 mov $18, $9 /* save old syscall number */ 617 mov $18, $9 /* save old syscall number */
452 mov $19, $10 /* save old a3 */ 618 mov $19, $10 /* save old a3 */
453 jsr $26, syscall_trace_leave 619 jsr $26, syscall_trace_leave
454 mov $9, $18 620 mov $9, $18
455 mov $10, $19 621 mov $10, $19
456 bsr $1, undo_switch_stack 622 UNDO_SWITCH_STACK
457 623
458 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ 624 mov $31, $26 /* tell "ret_from_sys_call" we can restart */
459 br ret_from_sys_call 625 br ret_from_sys_call
460.end strace 626CFI_END_OSF_FRAME entSys
461 627
462/* 628/*
463 * Save and restore the switch stack -- aka the balance of the user context. 629 * Save and restore the switch stack -- aka the balance of the user context.
464 */ 630 */
465 631
466 .align 4 632 .align 4
467 .ent do_switch_stack 633 .type do_switch_stack, @function
634 .cfi_startproc simple
635 .cfi_return_column 64
636 .cfi_def_cfa $sp, 0
637 .cfi_register 64, $1
468do_switch_stack: 638do_switch_stack:
469 lda $sp, -SWITCH_STACK_SIZE($sp) 639 lda $sp, -SWITCH_STACK_SIZE($sp)
640 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
470 stq $9, 0($sp) 641 stq $9, 0($sp)
471 stq $10, 8($sp) 642 stq $10, 8($sp)
472 stq $11, 16($sp) 643 stq $11, 16($sp)
@@ -510,10 +681,14 @@ do_switch_stack:
510 stt $f0, 312($sp) # save fpcr in slot of $f31 681 stt $f0, 312($sp) # save fpcr in slot of $f31
511 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. 682 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state.
512 ret $31, ($1), 1 683 ret $31, ($1), 1
513.end do_switch_stack 684 .cfi_endproc
685 .size do_switch_stack, .-do_switch_stack
514 686
515 .align 4 687 .align 4
516 .ent undo_switch_stack 688 .type undo_switch_stack, @function
689 .cfi_startproc simple
690 .cfi_def_cfa $sp, 0
691 .cfi_register 64, $1
517undo_switch_stack: 692undo_switch_stack:
518 ldq $9, 0($sp) 693 ldq $9, 0($sp)
519 ldq $10, 8($sp) 694 ldq $10, 8($sp)
@@ -558,7 +733,8 @@ undo_switch_stack:
558 ldt $f30, 304($sp) 733 ldt $f30, 304($sp)
559 lda $sp, SWITCH_STACK_SIZE($sp) 734 lda $sp, SWITCH_STACK_SIZE($sp)
560 ret $31, ($1), 1 735 ret $31, ($1), 1
561.end undo_switch_stack 736 .cfi_endproc
737 .size undo_switch_stack, .-undo_switch_stack
562 738
563/* 739/*
564 * The meat of the context switch code. 740 * The meat of the context switch code.
@@ -566,17 +742,18 @@ undo_switch_stack:
566 742
567 .align 4 743 .align 4
568 .globl alpha_switch_to 744 .globl alpha_switch_to
569 .ent alpha_switch_to 745 .type alpha_switch_to, @function
746 .cfi_startproc
570alpha_switch_to: 747alpha_switch_to:
571 .prologue 0 748 DO_SWITCH_STACK
572 bsr $1, do_switch_stack
573 call_pal PAL_swpctx 749 call_pal PAL_swpctx
574 lda $8, 0x3fff 750 lda $8, 0x3fff
575 bsr $1, undo_switch_stack 751 UNDO_SWITCH_STACK
576 bic $sp, $8, $8 752 bic $sp, $8, $8
577 mov $17, $0 753 mov $17, $0
578 ret 754 ret
579.end alpha_switch_to 755 .cfi_endproc
756 .size alpha_switch_to, .-alpha_switch_to
580 757
581/* 758/*
582 * New processes begin life here. 759 * New processes begin life here.
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index f433fc11877a..28e4429596f3 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -236,7 +236,7 @@ void __init
236init_rtc_irq(void) 236init_rtc_irq(void)
237{ 237{
238 irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, 238 irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
239 handle_simple_irq, "RTC"); 239 handle_percpu_irq, "RTC");
240 setup_irq(RTC_IRQ, &timer_irqaction); 240 setup_irq(RTC_IRQ, &timer_irqaction);
241} 241}
242 242
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 53b18a620e1c..9dbbcb3b9146 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -264,9 +264,10 @@ recv_secondary_console_msg(void)
264 if (cnt <= 0 || cnt >= 80) 264 if (cnt <= 0 || cnt >= 80)
265 strcpy(buf, "<<< BOGUS MSG >>>"); 265 strcpy(buf, "<<< BOGUS MSG >>>");
266 else { 266 else {
267 cp1 = (char *) &cpu->ipc_buffer[11]; 267 cp1 = (char *) &cpu->ipc_buffer[1];
268 cp2 = buf; 268 cp2 = buf;
269 strcpy(cp2, cp1); 269 memcpy(cp2, cp1, cnt);
270 cp2[cnt] = '\0';
270 271
271 while ((cp2 = strchr(cp2, '\r')) != 0) { 272 while ((cp2 = strchr(cp2, '\r')) != 0) {
272 *cp2 = ' '; 273 *cp2 = ' ';
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 5bf401f7ea97..6c35159bc00e 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -190,9 +190,6 @@ static struct irq_chip clipper_irq_type = {
190static void 190static void
191dp264_device_interrupt(unsigned long vector) 191dp264_device_interrupt(unsigned long vector)
192{ 192{
193#if 1
194 printk("dp264_device_interrupt: NOT IMPLEMENTED YET!!\n");
195#else
196 unsigned long pld; 193 unsigned long pld;
197 unsigned int i; 194 unsigned int i;
198 195
@@ -210,12 +207,7 @@ dp264_device_interrupt(unsigned long vector)
210 isa_device_interrupt(vector); 207 isa_device_interrupt(vector);
211 else 208 else
212 handle_irq(16 + i); 209 handle_irq(16 + i);
213#if 0
214 TSUNAMI_cchip->dir0.csr = 1UL << i; mb();
215 tmp = TSUNAMI_cchip->dir0.csr;
216#endif
217 } 210 }
218#endif
219} 211}
220 212
221static void 213static void
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 407accc80877..c92e389ff219 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -317,8 +317,9 @@ marvel_init_irq(void)
317} 317}
318 318
319static int 319static int
320marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin) 320marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
321{ 321{
322 struct pci_dev *dev = (struct pci_dev *)cdev;
322 struct pci_controller *hose = dev->sysdata; 323 struct pci_controller *hose = dev->sysdata;
323 struct io7_port *io7_port = hose->sysdata; 324 struct io7_port *io7_port = hose->sysdata;
324 struct io7 *io7 = io7_port->io7; 325 struct io7 *io7 = io7_port->io7;
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 4284ec798ec9..dca9b3fb0071 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -524,6 +524,8 @@ sys_call_table:
524 .quad sys_sendmmsg 524 .quad sys_sendmmsg
525 .quad sys_process_vm_readv 525 .quad sys_process_vm_readv
526 .quad sys_process_vm_writev /* 505 */ 526 .quad sys_process_vm_writev /* 505 */
527 .quad sys_kcmp
528 .quad sys_finit_module
527 529
528 .size sys_call_table, . - sys_call_table 530 .size sys_call_table, . - sys_call_table
529 .type sys_call_table, @object 531 .type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index e336694ca042..ea3395036556 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -105,9 +105,7 @@ void arch_irq_work_raise(void)
105 105
106static inline __u32 rpcc(void) 106static inline __u32 rpcc(void)
107{ 107{
108 __u32 result; 108 return __builtin_alpha_rpcc();
109 asm volatile ("rpcc %0" : "=r"(result));
110 return result;
111} 109}
112 110
113int update_persistent_clock(struct timespec now) 111int update_persistent_clock(struct timespec now)
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index be1fba334bd0..bd0665cdc840 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -66,8 +66,8 @@ dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
66{ 66{
67 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", 67 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
68 regs->pc, regs->r26, regs->ps, print_tainted()); 68 regs->pc, regs->r26, regs->ps, print_tainted());
69 print_symbol("pc is at %s\n", regs->pc); 69 printk("pc is at %pSR\n", (void *)regs->pc);
70 print_symbol("ra is at %s\n", regs->r26 ); 70 printk("ra is at %pSR\n", (void *)regs->r26);
71 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n", 71 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
72 regs->r0, regs->r1, regs->r2); 72 regs->r0, regs->r1, regs->r2);
73 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n", 73 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
@@ -132,9 +132,7 @@ dik_show_trace(unsigned long *sp)
132 continue; 132 continue;
133 if (tmp >= (unsigned long) &_etext) 133 if (tmp >= (unsigned long) &_etext)
134 continue; 134 continue;
135 printk("[<%lx>]", tmp); 135 printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
136 print_symbol(" %s", tmp);
137 printk("\n");
138 if (i > 40) { 136 if (i > 40) {
139 printk(" ..."); 137 printk(" ...");
140 break; 138 break;
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 8943c028d4bb..df57611652e5 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -38,6 +38,7 @@
38#include <asm/ptrace.h> 38#include <asm/ptrace.h>
39#include <asm/processor.h> /* For VMALLOC_START */ 39#include <asm/processor.h> /* For VMALLOC_START */
40#include <asm/thread_info.h> /* For THREAD_SIZE */ 40#include <asm/thread_info.h> /* For THREAD_SIZE */
41#include <asm/mmu.h>
41 42
42/* Note on the LD/ST addr modes with addr reg wback 43/* Note on the LD/ST addr modes with addr reg wback
43 * 44 *
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c10475d477..9c548c7cf001 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
39 ld.a r2,[r0,4] 39 ld.a r2,[r0,4]
40 sub r12,r6,r7 40 sub r12,r6,r7
41 bic r12,r12,r6 41 bic r12,r12,r6
42#ifdef __LITTLE_ENDIAN__
42 and r7,r12,r4 43 and r7,r12,r4
43 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. 44 breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
44 b .Lfound_char ; Likewise this one. 45 b .Lfound_char ; Likewise this one.
46#else
47 and r12,r12,r4
48 breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
49 lsr_s r12,r12,7
50 bic r2,r7,r6
51 b.d .Lfound_char_b
52 and_s r2,r2,r12
53#endif
45; /* We require this code address to be unaligned for speed... */ 54; /* We require this code address to be unaligned for speed... */
46.Laligned: 55.Laligned:
47 ld_s r2,[r0] 56 ld_s r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
95 lsr r7,r7,7 104 lsr r7,r7,7
96 105
97 bic r2,r7,r6 106 bic r2,r7,r6
107.Lfound_char_b:
98 norm r2,r2 108 norm r2,r2
99 sub_s r0,r0,4 109 sub_s r0,r0,4
100 asr_s r2,r2,3 110 asr_s r2,r2,3
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba412e02ec0c..43594d5116ef 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -20,7 +20,6 @@ config ARM
20 select GENERIC_STRNCPY_FROM_USER 20 select GENERIC_STRNCPY_FROM_USER
21 select GENERIC_STRNLEN_USER 21 select GENERIC_STRNLEN_USER
22 select HARDIRQS_SW_RESEND 22 select HARDIRQS_SW_RESEND
23 select HAVE_AOUT
24 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL 23 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
25 select HAVE_ARCH_KGDB 24 select HAVE_ARCH_KGDB
26 select HAVE_ARCH_SECCOMP_FILTER 25 select HAVE_ARCH_SECCOMP_FILTER
@@ -218,7 +217,8 @@ config VECTORS_BASE
218 default DRAM_BASE if REMAP_VECTORS_TO_RAM 217 default DRAM_BASE if REMAP_VECTORS_TO_RAM
219 default 0x00000000 218 default 0x00000000
220 help 219 help
221 The base address of exception vectors. 220 The base address of exception vectors. This must be two pages
221 in size.
222 222
223config ARM_PATCH_PHYS_VIRT 223config ARM_PATCH_PHYS_VIRT
224 bool "Patch physical to virtual translations at runtime" if EMBEDDED 224 bool "Patch physical to virtual translations at runtime" if EMBEDDED
@@ -1600,8 +1600,7 @@ config LOCAL_TIMERS
1600config ARCH_NR_GPIO 1600config ARCH_NR_GPIO
1601 int 1601 int
1602 default 1024 if ARCH_SHMOBILE || ARCH_TEGRA 1602 default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
1603 default 512 if SOC_OMAP5 1603 default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5
1604 default 512 if ARCH_KEYSTONE
1605 default 392 if ARCH_U8500 1604 default 392 if ARCH_U8500
1606 default 352 if ARCH_VT8500 1605 default 352 if ARCH_VT8500
1607 default 288 if ARCH_SUNXI 1606 default 288 if ARCH_SUNXI
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index e401a766c0bd..583f4a00ec32 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -804,9 +804,19 @@ config DEBUG_LL_INCLUDE
804 804
805config DEBUG_UNCOMPRESS 805config DEBUG_UNCOMPRESS
806 bool 806 bool
807 default y if ARCH_MULTIPLATFORM && DEBUG_LL && \ 807 depends on ARCH_MULTIPLATFORM
808 !DEBUG_OMAP2PLUS_UART && \ 808 default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
809 !DEBUG_TEGRA_UART 809 !DEBUG_TEGRA_UART
810 help
811 This option influences the normal decompressor output for
812 multiplatform kernels. Normally, multiplatform kernels disable
813 decompressor output because it is not possible to know where to
814 send the decompressor output.
815
816 When this option is set, the selected DEBUG_LL output method
817 will be re-used for normal decompressor output on multiplatform
818 kernels.
819
810 820
811config UNCOMPRESS_INCLUDE 821config UNCOMPRESS_INCLUDE
812 string 822 string
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c0ac0f5e5e5c..6fd2ceae305a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -153,6 +153,7 @@ machine-$(CONFIG_ARCH_DAVINCI) += davinci
153machine-$(CONFIG_ARCH_DOVE) += dove 153machine-$(CONFIG_ARCH_DOVE) += dove
154machine-$(CONFIG_ARCH_EBSA110) += ebsa110 154machine-$(CONFIG_ARCH_EBSA110) += ebsa110
155machine-$(CONFIG_ARCH_EP93XX) += ep93xx 155machine-$(CONFIG_ARCH_EP93XX) += ep93xx
156machine-$(CONFIG_ARCH_EXYNOS) += exynos
156machine-$(CONFIG_ARCH_GEMINI) += gemini 157machine-$(CONFIG_ARCH_GEMINI) += gemini
157machine-$(CONFIG_ARCH_HIGHBANK) += highbank 158machine-$(CONFIG_ARCH_HIGHBANK) += highbank
158machine-$(CONFIG_ARCH_INTEGRATOR) += integrator 159machine-$(CONFIG_ARCH_INTEGRATOR) += integrator
@@ -160,15 +161,16 @@ machine-$(CONFIG_ARCH_IOP13XX) += iop13xx
160machine-$(CONFIG_ARCH_IOP32X) += iop32x 161machine-$(CONFIG_ARCH_IOP32X) += iop32x
161machine-$(CONFIG_ARCH_IOP33X) += iop33x 162machine-$(CONFIG_ARCH_IOP33X) += iop33x
162machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx 163machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx
164machine-$(CONFIG_ARCH_KEYSTONE) += keystone
163machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood 165machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood
164machine-$(CONFIG_ARCH_KS8695) += ks8695 166machine-$(CONFIG_ARCH_KS8695) += ks8695
165machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx 167machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx
166machine-$(CONFIG_ARCH_MMP) += mmp 168machine-$(CONFIG_ARCH_MMP) += mmp
167machine-$(CONFIG_ARCH_MSM) += msm 169machine-$(CONFIG_ARCH_MSM) += msm
168machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0 170machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0
171machine-$(CONFIG_ARCH_MVEBU) += mvebu
169machine-$(CONFIG_ARCH_MXC) += imx 172machine-$(CONFIG_ARCH_MXC) += imx
170machine-$(CONFIG_ARCH_MXS) += mxs 173machine-$(CONFIG_ARCH_MXS) += mxs
171machine-$(CONFIG_ARCH_MVEBU) += mvebu
172machine-$(CONFIG_ARCH_NETX) += netx 174machine-$(CONFIG_ARCH_NETX) += netx
173machine-$(CONFIG_ARCH_NOMADIK) += nomadik 175machine-$(CONFIG_ARCH_NOMADIK) += nomadik
174machine-$(CONFIG_ARCH_NSPIRE) += nspire 176machine-$(CONFIG_ARCH_NSPIRE) += nspire
@@ -176,7 +178,6 @@ machine-$(CONFIG_ARCH_OMAP1) += omap1
176machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2 178machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2
177machine-$(CONFIG_ARCH_ORION5X) += orion5x 179machine-$(CONFIG_ARCH_ORION5X) += orion5x
178machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell 180machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell
179machine-$(CONFIG_ARCH_SIRF) += prima2
180machine-$(CONFIG_ARCH_PXA) += pxa 181machine-$(CONFIG_ARCH_PXA) += pxa
181machine-$(CONFIG_ARCH_REALVIEW) += realview 182machine-$(CONFIG_ARCH_REALVIEW) += realview
182machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip 183machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip
@@ -186,25 +187,24 @@ machine-$(CONFIG_ARCH_S3C64XX) += s3c64xx
186machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0 187machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0
187machine-$(CONFIG_ARCH_S5PC100) += s5pc100 188machine-$(CONFIG_ARCH_S5PC100) += s5pc100
188machine-$(CONFIG_ARCH_S5PV210) += s5pv210 189machine-$(CONFIG_ARCH_S5PV210) += s5pv210
189machine-$(CONFIG_ARCH_EXYNOS) += exynos
190machine-$(CONFIG_ARCH_SA1100) += sa1100 190machine-$(CONFIG_ARCH_SA1100) += sa1100
191machine-$(CONFIG_ARCH_SHARK) += shark 191machine-$(CONFIG_ARCH_SHARK) += shark
192machine-$(CONFIG_ARCH_SHMOBILE) += shmobile 192machine-$(CONFIG_ARCH_SHMOBILE) += shmobile
193machine-$(CONFIG_ARCH_SIRF) += prima2
194machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
195machine-$(CONFIG_ARCH_STI) += sti
196machine-$(CONFIG_ARCH_SUNXI) += sunxi
193machine-$(CONFIG_ARCH_TEGRA) += tegra 197machine-$(CONFIG_ARCH_TEGRA) += tegra
194machine-$(CONFIG_ARCH_U300) += u300 198machine-$(CONFIG_ARCH_U300) += u300
195machine-$(CONFIG_ARCH_U8500) += ux500 199machine-$(CONFIG_ARCH_U8500) += ux500
196machine-$(CONFIG_ARCH_VERSATILE) += versatile 200machine-$(CONFIG_ARCH_VERSATILE) += versatile
197machine-$(CONFIG_ARCH_VEXPRESS) += vexpress 201machine-$(CONFIG_ARCH_VEXPRESS) += vexpress
202machine-$(CONFIG_ARCH_VIRT) += virt
198machine-$(CONFIG_ARCH_VT8500) += vt8500 203machine-$(CONFIG_ARCH_VT8500) += vt8500
199machine-$(CONFIG_ARCH_W90X900) += w90x900 204machine-$(CONFIG_ARCH_W90X900) += w90x900
205machine-$(CONFIG_ARCH_ZYNQ) += zynq
200machine-$(CONFIG_FOOTBRIDGE) += footbridge 206machine-$(CONFIG_FOOTBRIDGE) += footbridge
201machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
202machine-$(CONFIG_PLAT_SPEAR) += spear 207machine-$(CONFIG_PLAT_SPEAR) += spear
203machine-$(CONFIG_ARCH_STI) += sti
204machine-$(CONFIG_ARCH_VIRT) += virt
205machine-$(CONFIG_ARCH_ZYNQ) += zynq
206machine-$(CONFIG_ARCH_SUNXI) += sunxi
207machine-$(CONFIG_ARCH_KEYSTONE) += keystone
208 208
209# Platform directory name. This list is sorted alphanumerically 209# Platform directory name. This list is sorted alphanumerically
210# by CONFIG_* macro name. 210# by CONFIG_* macro name.
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d59b70c6a6a0..3d77dbe406f4 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -14,11 +14,11 @@
14 compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; 14 compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
15 15
16 chosen { 16 chosen {
17 bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; 17 bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
18 }; 18 };
19 19
20 memory { 20 memory {
21 reg = <0x20000000 0x10000000>; 21 reg = <0x20000000 0x8000000>;
22 }; 22 };
23 23
24 clocks { 24 clocks {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index b753855b2058..49e3c45818c2 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -94,8 +94,9 @@
94 94
95 usb0: ohci@00600000 { 95 usb0: ohci@00600000 {
96 status = "okay"; 96 status = "okay";
97 num-ports = <2>; 97 num-ports = <3>;
98 atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW 98 atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */
99 &pioD 19 GPIO_ACTIVE_LOW
99 &pioD 20 GPIO_ACTIVE_LOW 100 &pioD 20 GPIO_ACTIVE_LOW
100 >; 101 >;
101 }; 102 };
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 9866cd736dee..a0f2721ea583 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -485,6 +485,12 @@
485 sirf,function = "usp0"; 485 sirf,function = "usp0";
486 }; 486 };
487 }; 487 };
488 usp0_uart_nostreamctrl_pins_a: usp0@1 {
489 usp0 {
490 sirf,pins = "usp0_uart_nostreamctrl_grp";
491 sirf,function = "usp0_uart_nostreamctrl";
492 };
493 };
488 usp1_pins_a: usp1@0 { 494 usp1_pins_a: usp1@0 {
489 usp1 { 495 usp1 {
490 sirf,pins = "usp1grp"; 496 sirf,pins = "usp1grp";
@@ -515,16 +521,16 @@
515 sirf,function = "pulse_count"; 521 sirf,function = "pulse_count";
516 }; 522 };
517 }; 523 };
518 cko0_rst_pins_a: cko0_rst@0 { 524 cko0_pins_a: cko0@0 {
519 cko0_rst { 525 cko0 {
520 sirf,pins = "cko0_rstgrp"; 526 sirf,pins = "cko0grp";
521 sirf,function = "cko0_rst"; 527 sirf,function = "cko0";
522 }; 528 };
523 }; 529 };
524 cko1_rst_pins_a: cko1_rst@0 { 530 cko1_pins_a: cko1@0 {
525 cko1_rst { 531 cko1 {
526 sirf,pins = "cko1_rstgrp"; 532 sirf,pins = "cko1grp";
527 sirf,function = "cko1_rst"; 533 sirf,function = "cko1";
528 }; 534 };
529 }; 535 };
530 }; 536 };
diff --git a/arch/arm/boot/dts/imx28-apx4devkit.dts b/arch/arm/boot/dts/imx28-apx4devkit.dts
index 43bf3c796cba..0e7fed47bd8d 100644
--- a/arch/arm/boot/dts/imx28-apx4devkit.dts
+++ b/arch/arm/boot/dts/imx28-apx4devkit.dts
@@ -147,7 +147,7 @@
147 reg = <0x0a>; 147 reg = <0x0a>;
148 VDDA-supply = <&reg_3p3v>; 148 VDDA-supply = <&reg_3p3v>;
149 VDDIO-supply = <&reg_3p3v>; 149 VDDIO-supply = <&reg_3p3v>;
150 150 clocks = <&saif0>;
151 }; 151 };
152 152
153 pcf8563: rtc@51 { 153 pcf8563: rtc@51 {
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index 1f0d38d7b16f..e035f4664b97 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -195,7 +195,7 @@
195 reg = <0x0a>; 195 reg = <0x0a>;
196 VDDA-supply = <&reg_3p3v>; 196 VDDA-supply = <&reg_3p3v>;
197 VDDIO-supply = <&reg_3p3v>; 197 VDDIO-supply = <&reg_3p3v>;
198 198 clocks = <&saif0>;
199 }; 199 };
200 200
201 at24@51 { 201 at24@51 {
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 880df2f13be8..44d9da57736e 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -184,7 +184,7 @@
184 reg = <0x0a>; 184 reg = <0x0a>;
185 VDDA-supply = <&reg_3p3v>; 185 VDDA-supply = <&reg_3p3v>;
186 VDDIO-supply = <&reg_3p3v>; 186 VDDIO-supply = <&reg_3p3v>;
187 187 clocks = <&saif0>;
188 }; 188 };
189 189
190 eeprom: eeprom@51 { 190 eeprom: eeprom@51 {
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 6a8acb01b1d3..9524a0571281 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -837,6 +837,7 @@
837 compatible = "fsl,imx28-saif"; 837 compatible = "fsl,imx28-saif";
838 reg = <0x80042000 0x2000>; 838 reg = <0x80042000 0x2000>;
839 interrupts = <59 80>; 839 interrupts = <59 80>;
840 #clock-cells = <0>;
840 clocks = <&clks 53>; 841 clocks = <&clks 53>;
841 dmas = <&dma_apbx 4>; 842 dmas = <&dma_apbx 4>;
842 dma-names = "rx-tx"; 843 dma-names = "rx-tx";
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 6dd9486c755b..ad3471ca17c7 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -61,6 +61,16 @@
61 mux-int-port = <2>; 61 mux-int-port = <2>;
62 mux-ext-port = <3>; 62 mux-ext-port = <3>;
63 }; 63 };
64
65 clocks {
66 clk_26M: codec_clock {
67 compatible = "fixed-clock";
68 reg=<0>;
69 #clock-cells = <0>;
70 clock-frequency = <26000000>;
71 gpios = <&gpio4 26 1>;
72 };
73 };
64}; 74};
65 75
66&esdhc1 { 76&esdhc1 {
@@ -229,6 +239,7 @@
229 MX51_PAD_EIM_A27__GPIO2_21 0x5 239 MX51_PAD_EIM_A27__GPIO2_21 0x5
230 MX51_PAD_CSPI1_SS0__GPIO4_24 0x85 240 MX51_PAD_CSPI1_SS0__GPIO4_24 0x85
231 MX51_PAD_CSPI1_SS1__GPIO4_25 0x85 241 MX51_PAD_CSPI1_SS1__GPIO4_25 0x85
242 MX51_PAD_CSPI1_RDY__GPIO4_26 0x80000000
232 >; 243 >;
233 }; 244 };
234 }; 245 };
@@ -255,7 +266,7 @@
255 sgtl5000: codec@0a { 266 sgtl5000: codec@0a {
256 compatible = "fsl,sgtl5000"; 267 compatible = "fsl,sgtl5000";
257 reg = <0x0a>; 268 reg = <0x0a>;
258 clock-frequency = <26000000>; 269 clocks = <&clk_26M>;
259 VDDA-supply = <&vdig_reg>; 270 VDDA-supply = <&vdig_reg>;
260 VDDIO-supply = <&vvideo_reg>; 271 VDDIO-supply = <&vvideo_reg>;
261 }; 272 };
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index aaa33bc99f78..a63090267941 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -27,7 +27,7 @@
27 27
28 backlight { 28 backlight {
29 compatible = "pwm-backlight"; 29 compatible = "pwm-backlight";
30 pwms = <&pwm2 0 50000 0 0>; 30 pwms = <&pwm2 0 50000>;
31 brightness-levels = <0 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100>; 31 brightness-levels = <0 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100>;
32 default-brightness-level = <10>; 32 default-brightness-level = <10>;
33 enable-gpios = <&gpio7 7 0>; 33 enable-gpios = <&gpio7 7 0>;
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 3895fbba8fce..569aa9f2c4ed 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -725,15 +725,15 @@
725 uart1 { 725 uart1 {
726 pinctrl_uart1_1: uart1grp-1 { 726 pinctrl_uart1_1: uart1grp-1 {
727 fsl,pins = < 727 fsl,pins = <
728 MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1c5 728 MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1e4
729 MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1c5 729 MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1e4
730 >; 730 >;
731 }; 731 };
732 732
733 pinctrl_uart1_2: uart1grp-2 { 733 pinctrl_uart1_2: uart1grp-2 {
734 fsl,pins = < 734 fsl,pins = <
735 MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1c5 735 MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1e4
736 MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1c5 736 MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1e4
737 >; 737 >;
738 }; 738 };
739 739
@@ -748,8 +748,8 @@
748 uart2 { 748 uart2 {
749 pinctrl_uart2_1: uart2grp-1 { 749 pinctrl_uart2_1: uart2grp-1 {
750 fsl,pins = < 750 fsl,pins = <
751 MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1c5 751 MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1e4
752 MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1c5 752 MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1e4
753 >; 753 >;
754 }; 754 };
755 755
@@ -766,17 +766,17 @@
766 uart3 { 766 uart3 {
767 pinctrl_uart3_1: uart3grp-1 { 767 pinctrl_uart3_1: uart3grp-1 {
768 fsl,pins = < 768 fsl,pins = <
769 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1c5 769 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1e4
770 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1c5 770 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1e4
771 MX53_PAD_PATA_DA_1__UART3_CTS 0x1c5 771 MX53_PAD_PATA_DA_1__UART3_CTS 0x1e4
772 MX53_PAD_PATA_DA_2__UART3_RTS 0x1c5 772 MX53_PAD_PATA_DA_2__UART3_RTS 0x1e4
773 >; 773 >;
774 }; 774 };
775 775
776 pinctrl_uart3_2: uart3grp-2 { 776 pinctrl_uart3_2: uart3grp-2 {
777 fsl,pins = < 777 fsl,pins = <
778 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1c5 778 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1e4
779 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1c5 779 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1e4
780 >; 780 >;
781 }; 781 };
782 782
@@ -785,8 +785,8 @@
785 uart4 { 785 uart4 {
786 pinctrl_uart4_1: uart4grp-1 { 786 pinctrl_uart4_1: uart4grp-1 {
787 fsl,pins = < 787 fsl,pins = <
788 MX53_PAD_KEY_COL0__UART4_TXD_MUX 0x1c5 788 MX53_PAD_KEY_COL0__UART4_TXD_MUX 0x1e4
789 MX53_PAD_KEY_ROW0__UART4_RXD_MUX 0x1c5 789 MX53_PAD_KEY_ROW0__UART4_RXD_MUX 0x1e4
790 >; 790 >;
791 }; 791 };
792 }; 792 };
@@ -794,8 +794,8 @@
794 uart5 { 794 uart5 {
795 pinctrl_uart5_1: uart5grp-1 { 795 pinctrl_uart5_1: uart5grp-1 {
796 fsl,pins = < 796 fsl,pins = <
797 MX53_PAD_KEY_COL1__UART5_TXD_MUX 0x1c5 797 MX53_PAD_KEY_COL1__UART5_TXD_MUX 0x1e4
798 MX53_PAD_KEY_ROW1__UART5_RXD_MUX 0x1c5 798 MX53_PAD_KEY_ROW1__UART5_RXD_MUX 0x1e4
799 >; 799 >;
800 }; 800 };
801 }; 801 };
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts
index db2060c46540..9c1167b0459b 100644
--- a/arch/arm/boot/dts/msm8960-cdp.dts
+++ b/arch/arm/boot/dts/msm8960-cdp.dts
@@ -26,7 +26,7 @@
26 cpu-offset = <0x80000>; 26 cpu-offset = <0x80000>;
27 }; 27 };
28 28
29 msmgpio: gpio@fd510000 { 29 msmgpio: gpio@800000 {
30 compatible = "qcom,msm-gpio"; 30 compatible = "qcom,msm-gpio";
31 gpio-controller; 31 gpio-controller;
32 #gpio-cells = <2>; 32 #gpio-cells = <2>;
@@ -34,7 +34,7 @@
34 interrupts = <0 32 0x4>; 34 interrupts = <0 32 0x4>;
35 interrupt-controller; 35 interrupt-controller;
36 #interrupt-cells = <2>; 36 #interrupt-cells = <2>;
37 reg = <0xfd510000 0x4000>; 37 reg = <0x800000 0x4000>;
38 }; 38 };
39 39
40 serial@16440000 { 40 serial@16440000 {
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 08b72678abff..65d7b601651c 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -235,7 +235,7 @@
235}; 235};
236 236
237&mmc1 { 237&mmc1 {
238 vmmc-supply = <&vmmcsd_fixed>; 238 vmmc-supply = <&ldo9_reg>;
239 bus-width = <4>; 239 bus-width = <4>;
240}; 240};
241 241
@@ -282,6 +282,7 @@
282 282
283 regulators { 283 regulators {
284 smps123_reg: smps123 { 284 smps123_reg: smps123 {
285 /* VDD_OPP_MPU */
285 regulator-name = "smps123"; 286 regulator-name = "smps123";
286 regulator-min-microvolt = < 600000>; 287 regulator-min-microvolt = < 600000>;
287 regulator-max-microvolt = <1500000>; 288 regulator-max-microvolt = <1500000>;
@@ -290,6 +291,7 @@
290 }; 291 };
291 292
292 smps45_reg: smps45 { 293 smps45_reg: smps45 {
294 /* VDD_OPP_MM */
293 regulator-name = "smps45"; 295 regulator-name = "smps45";
294 regulator-min-microvolt = < 600000>; 296 regulator-min-microvolt = < 600000>;
295 regulator-max-microvolt = <1310000>; 297 regulator-max-microvolt = <1310000>;
@@ -298,6 +300,7 @@
298 }; 300 };
299 301
300 smps6_reg: smps6 { 302 smps6_reg: smps6 {
303 /* VDD_DDR3 - over VDD_SMPS6 */
301 regulator-name = "smps6"; 304 regulator-name = "smps6";
302 regulator-min-microvolt = <1200000>; 305 regulator-min-microvolt = <1200000>;
303 regulator-max-microvolt = <1200000>; 306 regulator-max-microvolt = <1200000>;
@@ -306,6 +309,7 @@
306 }; 309 };
307 310
308 smps7_reg: smps7 { 311 smps7_reg: smps7 {
312 /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
309 regulator-name = "smps7"; 313 regulator-name = "smps7";
310 regulator-min-microvolt = <1800000>; 314 regulator-min-microvolt = <1800000>;
311 regulator-max-microvolt = <1800000>; 315 regulator-max-microvolt = <1800000>;
@@ -314,6 +318,7 @@
314 }; 318 };
315 319
316 smps8_reg: smps8 { 320 smps8_reg: smps8 {
321 /* VDD_OPP_CORE */
317 regulator-name = "smps8"; 322 regulator-name = "smps8";
318 regulator-min-microvolt = < 600000>; 323 regulator-min-microvolt = < 600000>;
319 regulator-max-microvolt = <1310000>; 324 regulator-max-microvolt = <1310000>;
@@ -322,15 +327,15 @@
322 }; 327 };
323 328
324 smps9_reg: smps9 { 329 smps9_reg: smps9 {
330 /* VDDA_2v1_AUD over VDD_2v1 */
325 regulator-name = "smps9"; 331 regulator-name = "smps9";
326 regulator-min-microvolt = <2100000>; 332 regulator-min-microvolt = <2100000>;
327 regulator-max-microvolt = <2100000>; 333 regulator-max-microvolt = <2100000>;
328 regulator-always-on;
329 regulator-boot-on;
330 ti,smps-range = <0x80>; 334 ti,smps-range = <0x80>;
331 }; 335 };
332 336
333 smps10_reg: smps10 { 337 smps10_reg: smps10 {
338 /* VBUS_5V_OTG */
334 regulator-name = "smps10"; 339 regulator-name = "smps10";
335 regulator-min-microvolt = <5000000>; 340 regulator-min-microvolt = <5000000>;
336 regulator-max-microvolt = <5000000>; 341 regulator-max-microvolt = <5000000>;
@@ -339,38 +344,40 @@
339 }; 344 };
340 345
341 ldo1_reg: ldo1 { 346 ldo1_reg: ldo1 {
347 /* VDDAPHY_CAM: vdda_csiport */
342 regulator-name = "ldo1"; 348 regulator-name = "ldo1";
343 regulator-min-microvolt = <2800000>; 349 regulator-min-microvolt = <1500000>;
344 regulator-max-microvolt = <2800000>; 350 regulator-max-microvolt = <1800000>;
345 regulator-always-on;
346 regulator-boot-on;
347 }; 351 };
348 352
349 ldo2_reg: ldo2 { 353 ldo2_reg: ldo2 {
354 /* VCC_2V8_DISP: Does not go anywhere */
350 regulator-name = "ldo2"; 355 regulator-name = "ldo2";
351 regulator-min-microvolt = <2900000>; 356 regulator-min-microvolt = <2800000>;
352 regulator-max-microvolt = <2900000>; 357 regulator-max-microvolt = <2800000>;
353 regulator-always-on; 358 /* Unused */
354 regulator-boot-on; 359 status = "disabled";
355 }; 360 };
356 361
357 ldo3_reg: ldo3 { 362 ldo3_reg: ldo3 {
363 /* VDDAPHY_MDM: vdda_lli */
358 regulator-name = "ldo3"; 364 regulator-name = "ldo3";
359 regulator-min-microvolt = <3000000>; 365 regulator-min-microvolt = <1500000>;
360 regulator-max-microvolt = <3000000>; 366 regulator-max-microvolt = <1500000>;
361 regulator-always-on;
362 regulator-boot-on; 367 regulator-boot-on;
368 /* Only if Modem is used */
369 status = "disabled";
363 }; 370 };
364 371
365 ldo4_reg: ldo4 { 372 ldo4_reg: ldo4 {
373 /* VDDAPHY_DISP: vdda_dsiport/hdmi */
366 regulator-name = "ldo4"; 374 regulator-name = "ldo4";
367 regulator-min-microvolt = <2200000>; 375 regulator-min-microvolt = <1500000>;
368 regulator-max-microvolt = <2200000>; 376 regulator-max-microvolt = <1800000>;
369 regulator-always-on;
370 regulator-boot-on;
371 }; 377 };
372 378
373 ldo5_reg: ldo5 { 379 ldo5_reg: ldo5 {
380 /* VDDA_1V8_PHY: usb/sata/hdmi.. */
374 regulator-name = "ldo5"; 381 regulator-name = "ldo5";
375 regulator-min-microvolt = <1800000>; 382 regulator-min-microvolt = <1800000>;
376 regulator-max-microvolt = <1800000>; 383 regulator-max-microvolt = <1800000>;
@@ -379,38 +386,43 @@
379 }; 386 };
380 387
381 ldo6_reg: ldo6 { 388 ldo6_reg: ldo6 {
389 /* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */
382 regulator-name = "ldo6"; 390 regulator-name = "ldo6";
383 regulator-min-microvolt = <1500000>; 391 regulator-min-microvolt = <1200000>;
384 regulator-max-microvolt = <1500000>; 392 regulator-max-microvolt = <1200000>;
385 regulator-always-on; 393 regulator-always-on;
386 regulator-boot-on; 394 regulator-boot-on;
387 }; 395 };
388 396
389 ldo7_reg: ldo7 { 397 ldo7_reg: ldo7 {
398 /* VDD_VPP: vpp1 */
390 regulator-name = "ldo7"; 399 regulator-name = "ldo7";
391 regulator-min-microvolt = <1500000>; 400 regulator-min-microvolt = <2000000>;
392 regulator-max-microvolt = <1500000>; 401 regulator-max-microvolt = <2000000>;
393 regulator-always-on; 402 /* Only for efuse reprograming! */
394 regulator-boot-on; 403 status = "disabled";
395 }; 404 };
396 405
397 ldo8_reg: ldo8 { 406 ldo8_reg: ldo8 {
407 /* VDD_3v0: Does not go anywhere */
398 regulator-name = "ldo8"; 408 regulator-name = "ldo8";
399 regulator-min-microvolt = <1500000>; 409 regulator-min-microvolt = <3000000>;
400 regulator-max-microvolt = <1500000>; 410 regulator-max-microvolt = <3000000>;
401 regulator-always-on;
402 regulator-boot-on; 411 regulator-boot-on;
412 /* Unused */
413 status = "disabled";
403 }; 414 };
404 415
405 ldo9_reg: ldo9 { 416 ldo9_reg: ldo9 {
417 /* VCC_DV_SDIO: vdds_sdcard */
406 regulator-name = "ldo9"; 418 regulator-name = "ldo9";
407 regulator-min-microvolt = <1800000>; 419 regulator-min-microvolt = <1800000>;
408 regulator-max-microvolt = <3300000>; 420 regulator-max-microvolt = <3000000>;
409 regulator-always-on;
410 regulator-boot-on; 421 regulator-boot-on;
411 }; 422 };
412 423
413 ldoln_reg: ldoln { 424 ldoln_reg: ldoln {
425 /* VDDA_1v8_REF: vdds_osc/mm_l4per.. */
414 regulator-name = "ldoln"; 426 regulator-name = "ldoln";
415 regulator-min-microvolt = <1800000>; 427 regulator-min-microvolt = <1800000>;
416 regulator-max-microvolt = <1800000>; 428 regulator-max-microvolt = <1800000>;
@@ -419,12 +431,20 @@
419 }; 431 };
420 432
421 ldousb_reg: ldousb { 433 ldousb_reg: ldousb {
434 /* VDDA_3V_USB: VDDA_USBHS33 */
422 regulator-name = "ldousb"; 435 regulator-name = "ldousb";
423 regulator-min-microvolt = <3250000>; 436 regulator-min-microvolt = <3250000>;
424 regulator-max-microvolt = <3250000>; 437 regulator-max-microvolt = <3250000>;
425 regulator-always-on; 438 regulator-always-on;
426 regulator-boot-on; 439 regulator-boot-on;
427 }; 440 };
441
442 regen3_reg: regen3 {
443 /* REGEN3 controls LDO9 supply to card */
444 regulator-name = "regen3";
445 regulator-always-on;
446 regulator-boot-on;
447 };
428 }; 448 };
429 }; 449 };
430 }; 450 };
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 05e9489cf95c..bbeb623fc2c6 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -515,16 +515,16 @@
515 sirf,function = "pulse_count"; 515 sirf,function = "pulse_count";
516 }; 516 };
517 }; 517 };
518 cko0_rst_pins_a: cko0_rst@0 { 518 cko0_pins_a: cko0@0 {
519 cko0_rst { 519 cko0 {
520 sirf,pins = "cko0_rstgrp"; 520 sirf,pins = "cko0grp";
521 sirf,function = "cko0_rst"; 521 sirf,function = "cko0";
522 }; 522 };
523 }; 523 };
524 cko1_rst_pins_a: cko1_rst@0 { 524 cko1_pins_a: cko1@0 {
525 cko1_rst { 525 cko1 {
526 sirf,pins = "cko1_rstgrp"; 526 sirf,pins = "cko1grp";
527 sirf,function = "cko1_rst"; 527 sirf,function = "cko1";
528 }; 528 };
529 }; 529 };
530 }; 530 };
diff --git a/arch/arm/boot/dts/stih416-pinctrl.dtsi b/arch/arm/boot/dts/stih416-pinctrl.dtsi
index 957b21a71b4b..0f246c979262 100644
--- a/arch/arm/boot/dts/stih416-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih416-pinctrl.dtsi
@@ -166,6 +166,15 @@
166 reg = <0x9000 0x100>; 166 reg = <0x9000 0x100>;
167 st,bank-name = "PIO31"; 167 st,bank-name = "PIO31";
168 }; 168 };
169
170 serial2-oe {
171 pinctrl_serial2_oe: serial2-1 {
172 st,pins {
173 output-enable = <&PIO11 3 ALT2 OUT>;
174 };
175 };
176 };
177
169 }; 178 };
170 179
171 pin-controller-rear { 180 pin-controller-rear {
@@ -218,7 +227,6 @@
218 st,pins { 227 st,pins {
219 tx = <&PIO17 4 ALT2 OUT>; 228 tx = <&PIO17 4 ALT2 OUT>;
220 rx = <&PIO17 5 ALT2 IN>; 229 rx = <&PIO17 5 ALT2 IN>;
221 output-enable = <&PIO11 3 ALT2 OUT>;
222 }; 230 };
223 }; 231 };
224 }; 232 };
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index 3cecd9689a49..1a0326ea7d07 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -79,7 +79,7 @@
79 interrupts = <0 197 0>; 79 interrupts = <0 197 0>;
80 clocks = <&CLK_S_ICN_REG_0>; 80 clocks = <&CLK_S_ICN_REG_0>;
81 pinctrl-names = "default"; 81 pinctrl-names = "default";
82 pinctrl-0 = <&pinctrl_serial2>; 82 pinctrl-0 = <&pinctrl_serial2 &pinctrl_serial2_oe>;
83 }; 83 };
84 84
85 /* SBC_UART1 */ 85 /* SBC_UART1 */
diff --git a/arch/arm/boot/dts/stih41x.dtsi b/arch/arm/boot/dts/stih41x.dtsi
index 7321403cab8a..f5b9898d9c6e 100644
--- a/arch/arm/boot/dts/stih41x.dtsi
+++ b/arch/arm/boot/dts/stih41x.dtsi
@@ -6,10 +6,12 @@
6 #address-cells = <1>; 6 #address-cells = <1>;
7 #size-cells = <0>; 7 #size-cells = <0>;
8 cpu@0 { 8 cpu@0 {
9 device_type = "cpu";
9 compatible = "arm,cortex-a9"; 10 compatible = "arm,cortex-a9";
10 reg = <0>; 11 reg = <0>;
11 }; 12 };
12 cpu@1 { 13 cpu@1 {
14 device_type = "cpu";
13 compatible = "arm,cortex-a9"; 15 compatible = "arm,cortex-a9";
14 reg = <1>; 16 reg = <1>;
15 }; 17 };
diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
index 2fcb3f2ca160..5592be6f2f7a 100644
--- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi
+++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
@@ -457,6 +457,7 @@
457 }; 457 };
458 458
459 usb-phy@c5004000 { 459 usb-phy@c5004000 {
460 status = "okay";
460 nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1) 461 nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1)
461 GPIO_ACTIVE_LOW>; 462 GPIO_ACTIVE_LOW>;
462 }; 463 };
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index 365760b33a26..40e6fb280333 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -830,6 +830,8 @@
830 regulator-max-microvolt = <5000000>; 830 regulator-max-microvolt = <5000000>;
831 enable-active-high; 831 enable-active-high;
832 gpio = <&gpio 24 0>; /* PD0 */ 832 gpio = <&gpio 24 0>; /* PD0 */
833 regulator-always-on;
834 regulator-boot-on;
833 }; 835 };
834 }; 836 };
835 837
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index ed4b901b0227..37c93d3c4812 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -412,6 +412,8 @@
412 regulator-max-microvolt = <5000000>; 412 regulator-max-microvolt = <5000000>;
413 enable-active-high; 413 enable-active-high;
414 gpio = <&gpio 170 0>; /* PV2 */ 414 gpio = <&gpio 170 0>; /* PV2 */
415 regulator-always-on;
416 regulator-boot-on;
415 }; 417 };
416 }; 418 };
417 419
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index ab67c94db280..a3d0ebad78a1 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -588,6 +588,8 @@
588 regulator-max-microvolt = <5000000>; 588 regulator-max-microvolt = <5000000>;
589 enable-active-high; 589 enable-active-high;
590 gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ 590 gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
591 regulator-always-on;
592 regulator-boot-on;
591 }; 593 };
592 594
593 vbus3_reg: regulator@3 { 595 vbus3_reg: regulator@3 {
@@ -598,6 +600,8 @@
598 regulator-max-microvolt = <5000000>; 600 regulator-max-microvolt = <5000000>;
599 enable-active-high; 601 enable-active-high;
600 gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ 602 gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
603 regulator-always-on;
604 regulator-boot-on;
601 }; 605 };
602 }; 606 };
603 607
diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi
index b3034da00a37..ae6a17aed9ee 100644
--- a/arch/arm/boot/dts/twl4030.dtsi
+++ b/arch/arm/boot/dts/twl4030.dtsi
@@ -47,6 +47,12 @@
47 regulator-max-microvolt = <3150000>; 47 regulator-max-microvolt = <3150000>;
48 }; 48 };
49 49
50 vmmc2: regulator-vmmc2 {
51 compatible = "ti,twl4030-vmmc2";
52 regulator-min-microvolt = <1850000>;
53 regulator-max-microvolt = <3150000>;
54 };
55
50 vusb1v5: regulator-vusb1v5 { 56 vusb1v5: regulator-vusb1v5 {
51 compatible = "ti,twl4030-vusb1v5"; 57 compatible = "ti,twl4030-vusb1v5";
52 }; 58 };
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index e1eb7dadda80..67d929cf9804 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -442,8 +442,8 @@
442 compatible = "fsl,mvf600-fec"; 442 compatible = "fsl,mvf600-fec";
443 reg = <0x400d0000 0x1000>; 443 reg = <0x400d0000 0x1000>;
444 interrupts = <0 78 0x04>; 444 interrupts = <0 78 0x04>;
445 clocks = <&clks VF610_CLK_ENET>, 445 clocks = <&clks VF610_CLK_ENET0>,
446 <&clks VF610_CLK_ENET>, 446 <&clks VF610_CLK_ENET0>,
447 <&clks VF610_CLK_ENET>; 447 <&clks VF610_CLK_ENET>;
448 clock-names = "ipg", "ahb", "ptp"; 448 clock-names = "ipg", "ahb", "ptp";
449 status = "disabled"; 449 status = "disabled";
@@ -453,8 +453,8 @@
453 compatible = "fsl,mvf600-fec"; 453 compatible = "fsl,mvf600-fec";
454 reg = <0x400d1000 0x1000>; 454 reg = <0x400d1000 0x1000>;
455 interrupts = <0 79 0x04>; 455 interrupts = <0 79 0x04>;
456 clocks = <&clks VF610_CLK_ENET>, 456 clocks = <&clks VF610_CLK_ENET1>,
457 <&clks VF610_CLK_ENET>, 457 <&clks VF610_CLK_ENET1>,
458 <&clks VF610_CLK_ENET>; 458 <&clks VF610_CLK_ENET>;
459 clock-names = "ipg", "ahb", "ptp"; 459 clock-names = "ipg", "ahb", "ptp";
460 status = "disabled"; 460 status = "disabled";
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index a432e6c1dac1..39ad030ac0c7 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -26,7 +26,6 @@
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/edma.h> 28#include <linux/edma.h>
29#include <linux/err.h>
30#include <linux/of_address.h> 29#include <linux/of_address.h>
31#include <linux/of_device.h> 30#include <linux/of_device.h>
32#include <linux/of_dma.h> 31#include <linux/of_dma.h>
diff --git a/arch/arm/configs/da8xx_omapl_defconfig b/arch/arm/configs/da8xx_omapl_defconfig
index 7c868139bdb0..1571bea48bed 100644
--- a/arch/arm/configs/da8xx_omapl_defconfig
+++ b/arch/arm/configs/da8xx_omapl_defconfig
@@ -102,6 +102,8 @@ CONFIG_SND_SOC=m
102CONFIG_SND_DAVINCI_SOC=m 102CONFIG_SND_DAVINCI_SOC=m
103# CONFIG_HID_SUPPORT is not set 103# CONFIG_HID_SUPPORT is not set
104# CONFIG_USB_SUPPORT is not set 104# CONFIG_USB_SUPPORT is not set
105CONFIG_DMADEVICES=y
106CONFIG_TI_EDMA=y
105CONFIG_EXT2_FS=y 107CONFIG_EXT2_FS=y
106CONFIG_EXT3_FS=y 108CONFIG_EXT3_FS=y
107CONFIG_XFS_FS=m 109CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index c86fd75e181a..ab2f7378352c 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -162,6 +162,8 @@ CONFIG_LEDS_TRIGGERS=y
162CONFIG_LEDS_TRIGGER_TIMER=m 162CONFIG_LEDS_TRIGGER_TIMER=m
163CONFIG_LEDS_TRIGGER_HEARTBEAT=m 163CONFIG_LEDS_TRIGGER_HEARTBEAT=m
164CONFIG_RTC_CLASS=y 164CONFIG_RTC_CLASS=y
165CONFIG_DMADEVICES=y
166CONFIG_TI_EDMA=y
165CONFIG_EXT2_FS=y 167CONFIG_EXT2_FS=y
166CONFIG_EXT3_FS=y 168CONFIG_EXT3_FS=y
167CONFIG_XFS_FS=m 169CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index fe0bdc361d2c..6e572c64cf5a 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -53,6 +53,7 @@ CONFIG_IP_PNP=y
53CONFIG_IP_PNP_DHCP=y 53CONFIG_IP_PNP_DHCP=y
54CONFIG_DEVTMPFS=y 54CONFIG_DEVTMPFS=y
55CONFIG_DEVTMPFS_MOUNT=y 55CONFIG_DEVTMPFS_MOUNT=y
56CONFIG_OMAP_OCP2SCP=y
56CONFIG_BLK_DEV_SD=y 57CONFIG_BLK_DEV_SD=y
57CONFIG_ATA=y 58CONFIG_ATA=y
58CONFIG_SATA_AHCI_PLATFORM=y 59CONFIG_SATA_AHCI_PLATFORM=y
@@ -61,6 +62,7 @@ CONFIG_SATA_MV=y
61CONFIG_NETDEVICES=y 62CONFIG_NETDEVICES=y
62CONFIG_SUN4I_EMAC=y 63CONFIG_SUN4I_EMAC=y
63CONFIG_NET_CALXEDA_XGMAC=y 64CONFIG_NET_CALXEDA_XGMAC=y
65CONFIG_KS8851=y
64CONFIG_SMSC911X=y 66CONFIG_SMSC911X=y
65CONFIG_STMMAC_ETH=y 67CONFIG_STMMAC_ETH=y
66CONFIG_MDIO_SUN4I=y 68CONFIG_MDIO_SUN4I=y
@@ -89,6 +91,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=y
89CONFIG_I2C_SIRF=y 91CONFIG_I2C_SIRF=y
90CONFIG_I2C_TEGRA=y 92CONFIG_I2C_TEGRA=y
91CONFIG_SPI=y 93CONFIG_SPI=y
94CONFIG_SPI_OMAP24XX=y
92CONFIG_SPI_PL022=y 95CONFIG_SPI_PL022=y
93CONFIG_SPI_SIRF=y 96CONFIG_SPI_SIRF=y
94CONFIG_SPI_TEGRA114=y 97CONFIG_SPI_TEGRA114=y
@@ -111,11 +114,12 @@ CONFIG_FB_SIMPLE=y
111CONFIG_USB=y 114CONFIG_USB=y
112CONFIG_USB_XHCI_HCD=y 115CONFIG_USB_XHCI_HCD=y
113CONFIG_USB_EHCI_HCD=y 116CONFIG_USB_EHCI_HCD=y
114CONFIG_USB_EHCI_MXC=y
115CONFIG_USB_EHCI_TEGRA=y 117CONFIG_USB_EHCI_TEGRA=y
116CONFIG_USB_EHCI_HCD_PLATFORM=y 118CONFIG_USB_EHCI_HCD_PLATFORM=y
117CONFIG_USB_ISP1760_HCD=y 119CONFIG_USB_ISP1760_HCD=y
118CONFIG_USB_STORAGE=y 120CONFIG_USB_STORAGE=y
121CONFIG_USB_CHIPIDEA=y
122CONFIG_USB_CHIPIDEA_HOST=y
119CONFIG_AB8500_USB=y 123CONFIG_AB8500_USB=y
120CONFIG_NOP_USB_XCEIV=y 124CONFIG_NOP_USB_XCEIV=y
121CONFIG_OMAP_USB2=y 125CONFIG_OMAP_USB2=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 35f8cf299fa2..263ae3869e32 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -1,6 +1,8 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
4CONFIG_NO_HZ_IDLE=y
5CONFIG_HIGH_RES_TIMERS=y
4CONFIG_IKCONFIG=y 6CONFIG_IKCONFIG=y
5CONFIG_IKCONFIG_PROC=y 7CONFIG_IKCONFIG_PROC=y
6CONFIG_LOG_BUF_SHIFT=14 8CONFIG_LOG_BUF_SHIFT=14
@@ -48,7 +50,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
48CONFIG_MTD=y 50CONFIG_MTD=y
49CONFIG_MTD_TESTS=m 51CONFIG_MTD_TESTS=m
50CONFIG_MTD_CMDLINE_PARTS=y 52CONFIG_MTD_CMDLINE_PARTS=y
51CONFIG_MTD_CHAR=y
52CONFIG_MTD_BLOCK=y 53CONFIG_MTD_BLOCK=y
53CONFIG_MTD_NAND_ECC_SMC=y 54CONFIG_MTD_NAND_ECC_SMC=y
54CONFIG_MTD_NAND=y 55CONFIG_MTD_NAND=y
@@ -94,8 +95,10 @@ CONFIG_I2C_GPIO=y
94CONFIG_I2C_NOMADIK=y 95CONFIG_I2C_NOMADIK=y
95CONFIG_DEBUG_GPIO=y 96CONFIG_DEBUG_GPIO=y
96# CONFIG_HWMON is not set 97# CONFIG_HWMON is not set
98CONFIG_REGULATOR=y
97CONFIG_MMC=y 99CONFIG_MMC=y
98CONFIG_MMC_CLKGATE=y 100CONFIG_MMC_UNSAFE_RESUME=y
101# CONFIG_MMC_BLOCK_BOUNCE is not set
99CONFIG_MMC_ARMMMCI=y 102CONFIG_MMC_ARMMMCI=y
100CONFIG_NEW_LEDS=y 103CONFIG_NEW_LEDS=y
101CONFIG_LEDS_CLASS=y 104CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
deleted file mode 100644
index 92f10cb5c70c..000000000000
--- a/arch/arm/include/asm/a.out-core.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/* a.out coredump register dumper
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _ASM_A_OUT_CORE_H
13#define _ASM_A_OUT_CORE_H
14
15#ifdef __KERNEL__
16
17#include <linux/user.h>
18#include <linux/elfcore.h>
19
20/*
21 * fill in the user structure for an a.out core dump
22 */
23static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
24{
25 struct task_struct *tsk = current;
26
27 dump->magic = CMAGIC;
28 dump->start_code = tsk->mm->start_code;
29 dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
30
31 dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
32 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
33 dump->u_ssize = 0;
34
35 memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
36
37 if (dump->start_stack < 0x04000000)
38 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
39
40 dump->regs = *regs;
41 dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
42}
43
44#endif /* __KERNEL__ */
45#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 8c25dc4e9851..9672e978d50d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -89,13 +89,18 @@ extern unsigned int processor_id;
89 __val; \ 89 __val; \
90 }) 90 })
91 91
92/*
93 * The memory clobber prevents gcc 4.5 from reordering the mrc before
94 * any is_smp() tests, which can cause undefined instruction aborts on
95 * ARM1136 r0 due to the missing extended CP15 registers.
96 */
92#define read_cpuid_ext(ext_reg) \ 97#define read_cpuid_ext(ext_reg) \
93 ({ \ 98 ({ \
94 unsigned int __val; \ 99 unsigned int __val; \
95 asm("mrc p15, 0, %0, c0, " ext_reg \ 100 asm("mrc p15, 0, %0, c0, " ext_reg \
96 : "=r" (__val) \ 101 : "=r" (__val) \
97 : \ 102 : \
98 : "cc"); \ 103 : "memory"); \
99 __val; \ 104 __val; \
100 }) 105 })
101 106
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 38050b1c4800..56211f2084ef 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -130,4 +130,10 @@ struct mm_struct;
130extern unsigned long arch_randomize_brk(struct mm_struct *mm); 130extern unsigned long arch_randomize_brk(struct mm_struct *mm);
131#define arch_randomize_brk arch_randomize_brk 131#define arch_randomize_brk arch_randomize_brk
132 132
133#ifdef CONFIG_MMU
134#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
135struct linux_binprm;
136int arch_setup_additional_pages(struct linux_binprm *, int);
137#endif
138
133#endif 139#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index e3d55547e755..6f18da09668b 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,8 +6,11 @@
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 atomic64_t id; 8 atomic64_t id;
9#else
10 int switch_pending;
9#endif 11#endif
10 unsigned int vmalloc_seq; 12 unsigned int vmalloc_seq;
13 unsigned long sigpage;
11} mm_context_t; 14} mm_context_t;
12 15
13#ifdef CONFIG_CPU_HAS_ASID 16#ifdef CONFIG_CPU_HAS_ASID
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index b5792b7fd8d3..9b32f76bb0dd 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
56 * on non-ASID CPUs, the old mm will remain valid until the 56 * on non-ASID CPUs, the old mm will remain valid until the
57 * finish_arch_post_lock_switch() call. 57 * finish_arch_post_lock_switch() call.
58 */ 58 */
59 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); 59 mm->context.switch_pending = 1;
60 else 60 else
61 cpu_switch_mm(mm->pgd, mm); 61 cpu_switch_mm(mm->pgd, mm);
62} 62}
@@ -65,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm,
65 finish_arch_post_lock_switch 65 finish_arch_post_lock_switch
66static inline void finish_arch_post_lock_switch(void) 66static inline void finish_arch_post_lock_switch(void)
67{ 67{
68 if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { 68 struct mm_struct *mm = current->mm;
69 struct mm_struct *mm = current->mm; 69
70 cpu_switch_mm(mm->pgd, mm); 70 if (mm && mm->context.switch_pending) {
71 /*
72 * Preemption must be disabled during cpu_switch_mm() as we
73 * have some stateful cache flush implementations. Check
74 * switch_pending again in case we were preempted and the
75 * switch to this mm was already done.
76 */
77 preempt_disable();
78 if (mm->context.switch_pending) {
79 mm->context.switch_pending = 0;
80 cpu_switch_mm(mm->pgd, mm);
81 }
82 preempt_enable_no_resched();
71 } 83 }
72} 84}
73 85
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 6363f3d1d505..4355f0ec44d6 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
142#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 142#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
143extern void copy_page(void *to, const void *from); 143extern void copy_page(void *to, const void *from);
144 144
145#ifdef CONFIG_KUSER_HELPERS
145#define __HAVE_ARCH_GATE_AREA 1 146#define __HAVE_ARCH_GATE_AREA 1
147#endif
146 148
147#ifdef CONFIG_ARM_LPAE 149#ifdef CONFIG_ARM_LPAE
148#include <asm/pgtable-3level-types.h> 150#include <asm/pgtable-3level-types.h>
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 06e7d509eaac..413f3876341c 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -54,7 +54,6 @@ struct thread_struct {
54 54
55#define start_thread(regs,pc,sp) \ 55#define start_thread(regs,pc,sp) \
56({ \ 56({ \
57 unsigned long *stack = (unsigned long *)sp; \
58 memset(regs->uregs, 0, sizeof(regs->uregs)); \ 57 memset(regs->uregs, 0, sizeof(regs->uregs)); \
59 if (current->personality & ADDR_LIMIT_32BIT) \ 58 if (current->personality & ADDR_LIMIT_32BIT) \
60 regs->ARM_cpsr = USR_MODE; \ 59 regs->ARM_cpsr = USR_MODE; \
@@ -65,9 +64,6 @@ struct thread_struct {
65 regs->ARM_cpsr |= PSR_ENDSTATE; \ 64 regs->ARM_cpsr |= PSR_ENDSTATE; \
66 regs->ARM_pc = pc & ~1; /* pc */ \ 65 regs->ARM_pc = pc & ~1; /* pc */ \
67 regs->ARM_sp = sp; /* sp */ \ 66 regs->ARM_sp = sp; /* sp */ \
68 regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
69 regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
70 regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
71 nommu_start_thread(regs); \ 67 nommu_start_thread(regs); \
72}) 68})
73 69
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 6462a721ebd4..a252c0bfacf5 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void)
88{ 88{
89 return 1 << mpidr_hash.bits; 89 return 1 << mpidr_hash.bits;
90} 90}
91
92extern int platform_can_cpu_hotplug(void);
93
91#endif 94#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index f8b8965666e9..b07c09e5a0ac 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
107 " subs %1, %0, %0, ror #16\n" 107 " subs %1, %0, %0, ror #16\n"
108 " addeq %0, %0, %4\n" 108 " addeq %0, %0, %4\n"
109 " strexeq %2, %0, [%3]" 109 " strexeq %2, %0, [%3]"
110 : "=&r" (slock), "=&r" (contended), "=r" (res) 110 : "=&r" (slock), "=&r" (contended), "=&r" (res)
111 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 111 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
112 : "cc"); 112 : "cc");
113 } while (res); 113 } while (res);
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
168 168
169static inline int arch_write_trylock(arch_rwlock_t *rw) 169static inline int arch_write_trylock(arch_rwlock_t *rw)
170{ 170{
171 unsigned long tmp; 171 unsigned long contended, res;
172 172
173 __asm__ __volatile__( 173 do {
174" ldrex %0, [%1]\n" 174 __asm__ __volatile__(
175" teq %0, #0\n" 175 " ldrex %0, [%2]\n"
176" strexeq %0, %2, [%1]" 176 " mov %1, #0\n"
177 : "=&r" (tmp) 177 " teq %0, #0\n"
178 : "r" (&rw->lock), "r" (0x80000000) 178 " strexeq %1, %3, [%2]"
179 : "cc"); 179 : "=&r" (contended), "=&r" (res)
180 : "r" (&rw->lock), "r" (0x80000000)
181 : "cc");
182 } while (res);
180 183
181 if (tmp == 0) { 184 if (!contended) {
182 smp_mb(); 185 smp_mb();
183 return 1; 186 return 1;
184 } else { 187 } else {
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
254 257
255static inline int arch_read_trylock(arch_rwlock_t *rw) 258static inline int arch_read_trylock(arch_rwlock_t *rw)
256{ 259{
257 unsigned long tmp, tmp2 = 1; 260 unsigned long contended, res;
258 261
259 __asm__ __volatile__( 262 do {
260" ldrex %0, [%2]\n" 263 __asm__ __volatile__(
261" adds %0, %0, #1\n" 264 " ldrex %0, [%2]\n"
262" strexpl %1, %0, [%2]\n" 265 " mov %1, #0\n"
263 : "=&r" (tmp), "+r" (tmp2) 266 " adds %0, %0, #1\n"
264 : "r" (&rw->lock) 267 " strexpl %1, %0, [%2]"
265 : "cc"); 268 : "=&r" (contended), "=&r" (res)
269 : "r" (&rw->lock)
270 : "cc");
271 } while (res);
266 272
267 smp_mb(); 273 /* If the lock is negative, then it is already held for write. */
268 return tmp2 == 0; 274 if (contended < 0x80000000) {
275 smp_mb();
276 return 1;
277 } else {
278 return 0;
279 }
269} 280}
270 281
271/* read_can_lock - would read_trylock() succeed? */ 282/* read_can_lock - would read_trylock() succeed? */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 214d4158089a..2b8114fcba09 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
156#define TIF_USING_IWMMXT 17 156#define TIF_USING_IWMMXT 17
157#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 157#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
158#define TIF_RESTORE_SIGMASK 20 158#define TIF_RESTORE_SIGMASK 20
159#define TIF_SWITCH_MM 22 /* deferred switch_mm */
160 159
161#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 160#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
162#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 161#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 46e7cfb3e721..0baf7f0d9394 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -43,6 +43,7 @@ struct mmu_gather {
43 struct mm_struct *mm; 43 struct mm_struct *mm;
44 unsigned int fullmm; 44 unsigned int fullmm;
45 struct vm_area_struct *vma; 45 struct vm_area_struct *vma;
46 unsigned long start, end;
46 unsigned long range_start; 47 unsigned long range_start;
47 unsigned long range_end; 48 unsigned long range_end;
48 unsigned int nr; 49 unsigned int nr;
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
107} 108}
108 109
109static inline void 110static inline void
110tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) 111tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
111{ 112{
112 tlb->mm = mm; 113 tlb->mm = mm;
113 tlb->fullmm = fullmm; 114 tlb->fullmm = !(start | (end+1));
115 tlb->start = start;
116 tlb->end = end;
114 tlb->vma = NULL; 117 tlb->vma = NULL;
115 tlb->max = ARRAY_SIZE(tlb->local); 118 tlb->max = ARRAY_SIZE(tlb->local);
116 tlb->pages = tlb->local; 119 tlb->pages = tlb->local;
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index fdbb9e369745..f467e9b3f8d5 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -443,7 +443,18 @@ static inline void local_flush_bp_all(void)
443 isb(); 443 isb();
444} 444}
445 445
446#include <asm/cputype.h>
446#ifdef CONFIG_ARM_ERRATA_798181 447#ifdef CONFIG_ARM_ERRATA_798181
448static inline int erratum_a15_798181(void)
449{
450 unsigned int midr = read_cpuid_id();
451
452 /* Cortex-A15 r0p0..r3p2 affected */
453 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
454 return 0;
455 return 1;
456}
457
447static inline void dummy_flush_tlb_a15_erratum(void) 458static inline void dummy_flush_tlb_a15_erratum(void)
448{ 459{
449 /* 460 /*
@@ -453,6 +464,11 @@ static inline void dummy_flush_tlb_a15_erratum(void)
453 dsb(); 464 dsb();
454} 465}
455#else 466#else
467static inline int erratum_a15_798181(void)
468{
469 return 0;
470}
471
456static inline void dummy_flush_tlb_a15_erratum(void) 472static inline void dummy_flush_tlb_a15_erratum(void)
457{ 473{
458} 474}
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 50af92bac737..4371f45c5784 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -29,6 +29,7 @@
29#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT 29#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
30 30
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32#include <asm/cacheflush.h>
32 33
33#ifdef CONFIG_ARM_VIRT_EXT 34#ifdef CONFIG_ARM_VIRT_EXT
34/* 35/*
@@ -41,10 +42,21 @@
41 */ 42 */
42extern int __boot_cpu_mode; 43extern int __boot_cpu_mode;
43 44
45static inline void sync_boot_mode(void)
46{
47 /*
48 * As secondaries write to __boot_cpu_mode with caches disabled, we
49 * must flush the corresponding cache entries to ensure the visibility
50 * of their writes.
51 */
52 sync_cache_r(&__boot_cpu_mode);
53}
54
44void __hyp_set_vectors(unsigned long phys_vector_base); 55void __hyp_set_vectors(unsigned long phys_vector_base);
45unsigned long __hyp_get_vectors(void); 56unsigned long __hyp_get_vectors(void);
46#else 57#else
47#define __boot_cpu_mode (SVC_MODE) 58#define __boot_cpu_mode (SVC_MODE)
59#define sync_boot_mode()
48#endif 60#endif
49 61
50#ifndef ZIMAGE 62#ifndef ZIMAGE
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 47bcb2d254af..18d76fd5a2af 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,7 +1,6 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4header-y += a.out.h
5header-y += byteorder.h 4header-y += byteorder.h
6header-y += fcntl.h 5header-y += fcntl.h
7header-y += hwcap.h 6header-y += hwcap.h
diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h
deleted file mode 100644
index 083894b2e3bc..000000000000
--- a/arch/arm/include/uapi/asm/a.out.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef __ARM_A_OUT_H__
2#define __ARM_A_OUT_H__
3
4#include <linux/personality.h>
5#include <linux/types.h>
6
7struct exec
8{
9 __u32 a_info; /* Use macros N_MAGIC, etc for access */
10 __u32 a_text; /* length of text, in bytes */
11 __u32 a_data; /* length of data, in bytes */
12 __u32 a_bss; /* length of uninitialized data area for file, in bytes */
13 __u32 a_syms; /* length of symbol table data in file, in bytes */
14 __u32 a_entry; /* start address */
15 __u32 a_trsize; /* length of relocation info for text, in bytes */
16 __u32 a_drsize; /* length of relocation info for data, in bytes */
17};
18
19/*
20 * This is always the same
21 */
22#define N_TXTADDR(a) (0x00008000)
23
24#define N_TRSIZE(a) ((a).a_trsize)
25#define N_DRSIZE(a) ((a).a_drsize)
26#define N_SYMSIZE(a) ((a).a_syms)
27
28#define M_ARM 103
29
30#ifndef LIBRARY_START_TEXT
31#define LIBRARY_START_TEXT (0x00c00000)
32#endif
33
34#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index a39cfc2a1f90..9cbe70c8b0ef 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc)
357 .endm 357 .endm
358 358
359 .macro kuser_cmpxchg_check 359 .macro kuser_cmpxchg_check
360#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 360#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
361 !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
361#ifndef CONFIG_MMU 362#ifndef CONFIG_MMU
362#warning "NPTL on non MMU needs fixing" 363#warning "NPTL on non MMU needs fixing"
363#else 364#else
@@ -742,6 +743,18 @@ ENDPROC(__switch_to)
742#endif 743#endif
743 .endm 744 .endm
744 745
746 .macro kuser_pad, sym, size
747 .if (. - \sym) & 3
748 .rept 4 - (. - \sym) & 3
749 .byte 0
750 .endr
751 .endif
752 .rept (\size - (. - \sym)) / 4
753 .word 0xe7fddef1
754 .endr
755 .endm
756
757#ifdef CONFIG_KUSER_HELPERS
745 .align 5 758 .align 5
746 .globl __kuser_helper_start 759 .globl __kuser_helper_start
747__kuser_helper_start: 760__kuser_helper_start:
@@ -832,18 +845,13 @@ kuser_cmpxchg64_fixup:
832#error "incoherent kernel configuration" 845#error "incoherent kernel configuration"
833#endif 846#endif
834 847
835 /* pad to next slot */ 848 kuser_pad __kuser_cmpxchg64, 64
836 .rept (16 - (. - __kuser_cmpxchg64)/4)
837 .word 0
838 .endr
839
840 .align 5
841 849
842__kuser_memory_barrier: @ 0xffff0fa0 850__kuser_memory_barrier: @ 0xffff0fa0
843 smp_dmb arm 851 smp_dmb arm
844 usr_ret lr 852 usr_ret lr
845 853
846 .align 5 854 kuser_pad __kuser_memory_barrier, 32
847 855
848__kuser_cmpxchg: @ 0xffff0fc0 856__kuser_cmpxchg: @ 0xffff0fc0
849 857
@@ -916,13 +924,14 @@ kuser_cmpxchg32_fixup:
916 924
917#endif 925#endif
918 926
919 .align 5 927 kuser_pad __kuser_cmpxchg, 32
920 928
921__kuser_get_tls: @ 0xffff0fe0 929__kuser_get_tls: @ 0xffff0fe0
922 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init 930 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
923 usr_ret lr 931 usr_ret lr
924 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code 932 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
925 .rep 4 933 kuser_pad __kuser_get_tls, 16
934 .rep 3
926 .word 0 @ 0xffff0ff0 software TLS value, then 935 .word 0 @ 0xffff0ff0 software TLS value, then
927 .endr @ pad up to __kuser_helper_version 936 .endr @ pad up to __kuser_helper_version
928 937
@@ -932,14 +941,16 @@ __kuser_helper_version: @ 0xffff0ffc
932 .globl __kuser_helper_end 941 .globl __kuser_helper_end
933__kuser_helper_end: 942__kuser_helper_end:
934 943
944#endif
945
935 THUMB( .thumb ) 946 THUMB( .thumb )
936 947
937/* 948/*
938 * Vector stubs. 949 * Vector stubs.
939 * 950 *
940 * This code is copied to 0xffff0200 so we can use branches in the 951 * This code is copied to 0xffff1000 so we can use branches in the
941 * vectors, rather than ldr's. Note that this code must not 952 * vectors, rather than ldr's. Note that this code must not exceed
942 * exceed 0x300 bytes. 953 * a page size.
943 * 954 *
944 * Common stub entry macro: 955 * Common stub entry macro:
945 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 956 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -986,8 +997,17 @@ ENDPROC(vector_\name)
9861: 9971:
987 .endm 998 .endm
988 999
989 .globl __stubs_start 1000 .section .stubs, "ax", %progbits
990__stubs_start: 1001__stubs_start:
1002 @ This must be the first word
1003 .word vector_swi
1004
1005vector_rst:
1006 ARM( swi SYS_ERROR0 )
1007 THUMB( svc #0 )
1008 THUMB( nop )
1009 b vector_und
1010
991/* 1011/*
992 * Interrupt dispatcher 1012 * Interrupt dispatcher
993 */ 1013 */
@@ -1082,6 +1102,16 @@ __stubs_start:
1082 .align 5 1102 .align 5
1083 1103
1084/*============================================================================= 1104/*=============================================================================
1105 * Address exception handler
1106 *-----------------------------------------------------------------------------
1107 * These aren't too critical.
1108 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1109 */
1110
1111vector_addrexcptn:
1112 b vector_addrexcptn
1113
1114/*=============================================================================
1085 * Undefined FIQs 1115 * Undefined FIQs
1086 *----------------------------------------------------------------------------- 1116 *-----------------------------------------------------------------------------
1087 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC 1117 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
@@ -1094,45 +1124,19 @@ __stubs_start:
1094vector_fiq: 1124vector_fiq:
1095 subs pc, lr, #4 1125 subs pc, lr, #4
1096 1126
1097/*============================================================================= 1127 .globl vector_fiq_offset
1098 * Address exception handler 1128 .equ vector_fiq_offset, vector_fiq
1099 *-----------------------------------------------------------------------------
1100 * These aren't too critical.
1101 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1102 */
1103
1104vector_addrexcptn:
1105 b vector_addrexcptn
1106
1107/*
1108 * We group all the following data together to optimise
1109 * for CPUs with separate I & D caches.
1110 */
1111 .align 5
1112
1113.LCvswi:
1114 .word vector_swi
1115
1116 .globl __stubs_end
1117__stubs_end:
1118
1119 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1120 1129
1121 .globl __vectors_start 1130 .section .vectors, "ax", %progbits
1122__vectors_start: 1131__vectors_start:
1123 ARM( swi SYS_ERROR0 ) 1132 W(b) vector_rst
1124 THUMB( svc #0 ) 1133 W(b) vector_und
1125 THUMB( nop ) 1134 W(ldr) pc, __vectors_start + 0x1000
1126 W(b) vector_und + stubs_offset 1135 W(b) vector_pabt
1127 W(ldr) pc, .LCvswi + stubs_offset 1136 W(b) vector_dabt
1128 W(b) vector_pabt + stubs_offset 1137 W(b) vector_addrexcptn
1129 W(b) vector_dabt + stubs_offset 1138 W(b) vector_irq
1130 W(b) vector_addrexcptn + stubs_offset 1139 W(b) vector_fiq
1131 W(b) vector_irq + stubs_offset
1132 W(b) vector_fiq + stubs_offset
1133
1134 .globl __vectors_end
1135__vectors_end:
1136 1140
1137 .data 1141 .data
1138 1142
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index e00621f1403f..52b26432c9a9 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -49,7 +49,7 @@ __irq_entry:
49 mov r1, sp 49 mov r1, sp
50 stmdb sp!, {lr} 50 stmdb sp!, {lr}
51 @ routine called with r0 = irq number, r1 = struct pt_regs * 51 @ routine called with r0 = irq number, r1 = struct pt_regs *
52 bl nvic_do_IRQ 52 bl nvic_handle_irq
53 53
54 pop {lr} 54 pop {lr}
55 @ 55 @
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 2adda11f712f..918875d96d5d 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -47,6 +47,11 @@
47#include <asm/irq.h> 47#include <asm/irq.h>
48#include <asm/traps.h> 48#include <asm/traps.h>
49 49
50#define FIQ_OFFSET ({ \
51 extern void *vector_fiq_offset; \
52 (unsigned)&vector_fiq_offset; \
53 })
54
50static unsigned long no_fiq_insn; 55static unsigned long no_fiq_insn;
51 56
52/* Default reacquire function 57/* Default reacquire function
@@ -79,14 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec)
79 84
80void set_fiq_handler(void *start, unsigned int length) 85void set_fiq_handler(void *start, unsigned int length)
81{ 86{
82#if defined(CONFIG_CPU_USE_DOMAINS) 87 void *base = vectors_page;
83 memcpy((void *)0xffff001c, start, length); 88 unsigned offset = FIQ_OFFSET;
84#else 89
85 memcpy(vectors_page + 0x1c, start, length); 90 memcpy(base + offset, start, length);
86#endif 91 if (!cache_is_vipt_nonaliasing())
87 flush_icache_range(0xffff001c, 0xffff001c + length); 92 flush_icache_range((unsigned long)base + offset, offset +
88 if (!vectors_high()) 93 length);
89 flush_icache_range(0x1c, 0x1c + length); 94 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
90} 95}
91 96
92int claim_fiq(struct fiq_handler *f) 97int claim_fiq(struct fiq_handler *f)
@@ -144,6 +149,7 @@ EXPORT_SYMBOL(disable_fiq);
144 149
145void __init init_FIQ(int start) 150void __init init_FIQ(int start)
146{ 151{
147 no_fiq_insn = *(unsigned long *)0xffff001c; 152 unsigned offset = FIQ_OFFSET;
153 no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
148 fiq_start = start; 154 fiq_start = start;
149} 155}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index b361de143756..14235ba64a90 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -87,6 +87,7 @@ ENTRY(stext)
87ENDPROC(stext) 87ENDPROC(stext)
88 88
89#ifdef CONFIG_SMP 89#ifdef CONFIG_SMP
90 .text
90ENTRY(secondary_startup) 91ENTRY(secondary_startup)
91 /* 92 /*
92 * Common entry point for secondary CPUs. 93 * Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 9cf6063020ae..2c7cc1e03473 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -343,6 +343,7 @@ __turn_mmu_on_loc:
343 .long __turn_mmu_on_end 343 .long __turn_mmu_on_end
344 344
345#if defined(CONFIG_SMP) 345#if defined(CONFIG_SMP)
346 .text
346ENTRY(secondary_startup) 347ENTRY(secondary_startup)
347 /* 348 /*
348 * Common entry point for secondary CPUs. 349 * Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 4910232c4833..797b1a6a4906 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode)
56 ldr \reg3, [\reg2] 56 ldr \reg3, [\reg2]
57 ldr \reg1, [\reg2, \reg3] 57 ldr \reg1, [\reg2, \reg3]
58 cmp \mode, \reg1 @ matches primary CPU boot mode? 58 cmp \mode, \reg1 @ matches primary CPU boot mode?
59 orrne r7, r7, #BOOT_CPU_MODE_MISMATCH 59 orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
60 strne r7, [r5, r6] @ record what happened and give up 60 strne \reg1, [\reg2, \reg3] @ record what happened and give up
61 .endm 61 .endm
62 62
63#else /* ZIMAGE */ 63#else /* ZIMAGE */
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 4fb074c446bf..57221e349a7c 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -15,6 +15,7 @@
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18#include <asm/smp_plat.h>
18#include <asm/system_misc.h> 19#include <asm/system_misc.h>
19 20
20extern const unsigned char relocate_new_kernel[]; 21extern const unsigned char relocate_new_kernel[];
@@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image)
39 int i, err; 40 int i, err;
40 41
41 /* 42 /*
43 * Validate that if the current HW supports SMP, then the SW supports
44 * and implements CPU hotplug for the current HW. If not, we won't be
45 * able to kexec reliably, so fail the prepare operation.
46 */
47 if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
48 return -EINVAL;
49
50 /*
42 * No segment at default ATAGs address. try to locate 51 * No segment at default ATAGs address. try to locate
43 * a dtb using magic. 52 * a dtb using magic.
44 */ 53 */
@@ -73,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
73 crash_save_cpu(&regs, smp_processor_id()); 82 crash_save_cpu(&regs, smp_processor_id());
74 flush_cache_all(); 83 flush_cache_all();
75 84
85 set_cpu_online(smp_processor_id(), false);
76 atomic_dec(&waiting_for_crash_ipi); 86 atomic_dec(&waiting_for_crash_ipi);
77 while (1) 87 while (1)
78 cpu_relax(); 88 cpu_relax();
@@ -134,10 +144,13 @@ void machine_kexec(struct kimage *image)
134 unsigned long reboot_code_buffer_phys; 144 unsigned long reboot_code_buffer_phys;
135 void *reboot_code_buffer; 145 void *reboot_code_buffer;
136 146
137 if (num_online_cpus() > 1) { 147 /*
138 pr_err("kexec: error: multiple CPUs still online\n"); 148 * This can only happen if machine_shutdown() failed to disable some
139 return; 149 * CPU, and that can only happen if the checks in
140 } 150 * machine_kexec_prepare() were not correct. If this fails, we can't
151 * reliably kexec anyway, so BUG_ON is appropriate.
152 */
153 BUG_ON(num_online_cpus() > 1);
141 154
142 page_list = image->head & PAGE_MASK; 155 page_list = image->head & PAGE_MASK;
143 156
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d9f5cd4e533f..e186ee1e63f6 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
53static int 53static int
54armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 54armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
55{ 55{
56 int mapping = (*event_map)[config]; 56 int mapping;
57
58 if (config >= PERF_COUNT_HW_MAX)
59 return -EINVAL;
60
61 mapping = (*event_map)[config];
57 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 62 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
58} 63}
59 64
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
253 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 258 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
254 struct pmu *leader_pmu = event->group_leader->pmu; 259 struct pmu *leader_pmu = event->group_leader->pmu;
255 260
261 if (is_software_event(event))
262 return 1;
263
256 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) 264 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
257 return 1; 265 return 1;
258 266
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index d3ca4f6915af..94f6b05f9e24 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -197,6 +197,7 @@ void machine_shutdown(void)
197 */ 197 */
198void machine_halt(void) 198void machine_halt(void)
199{ 199{
200 local_irq_disable();
200 smp_send_stop(); 201 smp_send_stop();
201 202
202 local_irq_disable(); 203 local_irq_disable();
@@ -211,6 +212,7 @@ void machine_halt(void)
211 */ 212 */
212void machine_power_off(void) 213void machine_power_off(void)
213{ 214{
215 local_irq_disable();
214 smp_send_stop(); 216 smp_send_stop();
215 217
216 if (pm_power_off) 218 if (pm_power_off)
@@ -230,6 +232,7 @@ void machine_power_off(void)
230 */ 232 */
231void machine_restart(char *cmd) 233void machine_restart(char *cmd)
232{ 234{
235 local_irq_disable();
233 smp_send_stop(); 236 smp_send_stop();
234 237
235 arm_pm_restart(reboot_mode, cmd); 238 arm_pm_restart(reboot_mode, cmd);
@@ -426,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
426} 429}
427 430
428#ifdef CONFIG_MMU 431#ifdef CONFIG_MMU
432#ifdef CONFIG_KUSER_HELPERS
429/* 433/*
430 * The vectors page is always readable from user space for the 434 * The vectors page is always readable from user space for the
431 * atomic helpers and the signal restart code. Insert it into the 435 * atomic helpers. Insert it into the gate_vma so that it is visible
432 * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. 436 * through ptrace and /proc/<pid>/mem.
433 */ 437 */
434static struct vm_area_struct gate_vma = { 438static struct vm_area_struct gate_vma = {
435 .vm_start = 0xffff0000, 439 .vm_start = 0xffff0000,
@@ -458,9 +462,48 @@ int in_gate_area_no_mm(unsigned long addr)
458{ 462{
459 return in_gate_area(NULL, addr); 463 return in_gate_area(NULL, addr);
460} 464}
465#define is_gate_vma(vma) ((vma) == &gate_vma)
466#else
467#define is_gate_vma(vma) 0
468#endif
461 469
462const char *arch_vma_name(struct vm_area_struct *vma) 470const char *arch_vma_name(struct vm_area_struct *vma)
463{ 471{
464 return (vma == &gate_vma) ? "[vectors]" : NULL; 472 return is_gate_vma(vma) ? "[vectors]" :
473 (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
474 "[sigpage]" : NULL;
475}
476
477static struct page *signal_page;
478extern struct page *get_signal_page(void);
479
480int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
481{
482 struct mm_struct *mm = current->mm;
483 unsigned long addr;
484 int ret;
485
486 if (!signal_page)
487 signal_page = get_signal_page();
488 if (!signal_page)
489 return -ENOMEM;
490
491 down_write(&mm->mmap_sem);
492 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
493 if (IS_ERR_VALUE(addr)) {
494 ret = addr;
495 goto up_fail;
496 }
497
498 ret = install_special_mapping(mm, addr, PAGE_SIZE,
499 VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
500 &signal_page);
501
502 if (ret == 0)
503 mm->context.sigpage = addr;
504
505 up_fail:
506 up_write(&mm->mmap_sem);
507 return ret;
465} 508}
466#endif 509#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 63af9a7ae512..afc2489ee13b 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
836void __init hyp_mode_check(void) 836void __init hyp_mode_check(void)
837{ 837{
838#ifdef CONFIG_ARM_VIRT_EXT 838#ifdef CONFIG_ARM_VIRT_EXT
839 sync_boot_mode();
840
839 if (is_hyp_mode_available()) { 841 if (is_hyp_mode_available()) {
840 pr_info("CPU: All CPU(s) started in HYP mode.\n"); 842 pr_info("CPU: All CPU(s) started in HYP mode.\n");
841 pr_info("CPU: Virtualization extensions available.\n"); 843 pr_info("CPU: Virtualization extensions available.\n");
@@ -971,6 +973,7 @@ static const char *hwcap_str[] = {
971 "vfpv4", 973 "vfpv4",
972 "idiva", 974 "idiva",
973 "idivt", 975 "idivt",
976 "vfpd32",
974 "lpae", 977 "lpae",
975 NULL 978 NULL
976}; 979};
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 1c16c35c271a..ab3304225272 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/random.h>
11#include <linux/signal.h> 12#include <linux/signal.h>
12#include <linux/personality.h> 13#include <linux/personality.h>
13#include <linux/uaccess.h> 14#include <linux/uaccess.h>
@@ -15,12 +16,11 @@
15 16
16#include <asm/elf.h> 17#include <asm/elf.h>
17#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/traps.h>
18#include <asm/ucontext.h> 20#include <asm/ucontext.h>
19#include <asm/unistd.h> 21#include <asm/unistd.h>
20#include <asm/vfp.h> 22#include <asm/vfp.h>
21 23
22#include "signal.h"
23
24/* 24/*
25 * For ARM syscalls, we encode the syscall number into the instruction. 25 * For ARM syscalls, we encode the syscall number into the instruction.
26 */ 26 */
@@ -40,11 +40,13 @@
40#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) 40#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
41#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) 41#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
42 42
43const unsigned long sigreturn_codes[7] = { 43static const unsigned long sigreturn_codes[7] = {
44 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 44 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
45 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, 45 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
46}; 46};
47 47
48static unsigned long signal_return_offset;
49
48#ifdef CONFIG_CRUNCH 50#ifdef CONFIG_CRUNCH
49static int preserve_crunch_context(struct crunch_sigframe __user *frame) 51static int preserve_crunch_context(struct crunch_sigframe __user *frame)
50{ 52{
@@ -400,14 +402,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
400 __put_user(sigreturn_codes[idx+1], rc+1)) 402 __put_user(sigreturn_codes[idx+1], rc+1))
401 return 1; 403 return 1;
402 404
403 if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { 405#ifdef CONFIG_MMU
406 if (cpsr & MODE32_BIT) {
407 struct mm_struct *mm = current->mm;
408
404 /* 409 /*
405 * 32-bit code can use the new high-page 410 * 32-bit code can use the signal return page
406 * signal return code support except when the MPU has 411 * except when the MPU has protected the vectors
407 * protected the vectors page from PL0 412 * page from PL0
408 */ 413 */
409 retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; 414 retcode = mm->context.sigpage + signal_return_offset +
410 } else { 415 (idx << 2) + thumb;
416 } else
417#endif
418 {
411 /* 419 /*
412 * Ensure that the instruction cache sees 420 * Ensure that the instruction cache sees
413 * the return code written onto the stack. 421 * the return code written onto the stack.
@@ -608,3 +616,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
608 } while (thread_flags & _TIF_WORK_MASK); 616 } while (thread_flags & _TIF_WORK_MASK);
609 return 0; 617 return 0;
610} 618}
619
620struct page *get_signal_page(void)
621{
622 unsigned long ptr;
623 unsigned offset;
624 struct page *page;
625 void *addr;
626
627 page = alloc_pages(GFP_KERNEL, 0);
628
629 if (!page)
630 return NULL;
631
632 addr = page_address(page);
633
634 /* Give the signal return code some randomness */
635 offset = 0x200 + (get_random_int() & 0x7fc);
636 signal_return_offset = offset;
637
638 /*
639 * Copy signal return handlers into the vector page, and
640 * set sigreturn to be a pointer to these.
641 */
642 memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
643
644 ptr = (unsigned long)addr + offset;
645 flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
646
647 return page;
648}
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
deleted file mode 100644
index 5ff067b7c752..000000000000
--- a/arch/arm/kernel/signal.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * linux/arch/arm/kernel/signal.h
3 *
4 * Copyright (C) 2005-2009 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
11
12extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c2b4f8f0be9a..2dc19349eb19 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
145 return -ENOSYS; 145 return -ENOSYS;
146} 146}
147 147
148int platform_can_cpu_hotplug(void)
149{
150#ifdef CONFIG_HOTPLUG_CPU
151 if (smp_ops.cpu_kill)
152 return 1;
153#endif
154
155 return 0;
156}
157
148#ifdef CONFIG_HOTPLUG_CPU 158#ifdef CONFIG_HOTPLUG_CPU
149static void percpu_timer_stop(void); 159static void percpu_timer_stop(void);
150 160
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index a98b62dca2fa..c2edfff573c2 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored)
70 local_flush_bp_all(); 70 local_flush_bp_all();
71} 71}
72 72
73#ifdef CONFIG_ARM_ERRATA_798181
74static int erratum_a15_798181(void)
75{
76 unsigned int midr = read_cpuid_id();
77
78 /* Cortex-A15 r0p0..r3p2 affected */
79 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
80 return 0;
81 return 1;
82}
83#else
84static int erratum_a15_798181(void)
85{
86 return 0;
87}
88#endif
89
90static void ipi_flush_tlb_a15_erratum(void *arg) 73static void ipi_flush_tlb_a15_erratum(void *arg)
91{ 74{
92 dmb(); 75 dmb();
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cab094c234ee..ab517fcce21b 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -35,8 +35,6 @@
35#include <asm/tls.h> 35#include <asm/tls.h>
36#include <asm/system_misc.h> 36#include <asm/system_misc.h>
37 37
38#include "signal.h"
39
40static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 38static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
41 39
42void *vectors_page; 40void *vectors_page;
@@ -800,15 +798,26 @@ void __init trap_init(void)
800 return; 798 return;
801} 799}
802 800
803static void __init kuser_get_tls_init(unsigned long vectors) 801#ifdef CONFIG_KUSER_HELPERS
802static void __init kuser_init(void *vectors)
804{ 803{
804 extern char __kuser_helper_start[], __kuser_helper_end[];
805 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
806
807 memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
808
805 /* 809 /*
806 * vectors + 0xfe0 = __kuser_get_tls 810 * vectors + 0xfe0 = __kuser_get_tls
807 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 811 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
808 */ 812 */
809 if (tls_emu || has_tls_reg) 813 if (tls_emu || has_tls_reg)
810 memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); 814 memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
811} 815}
816#else
817static void __init kuser_init(void *vectors)
818{
819}
820#endif
812 821
813void __init early_trap_init(void *vectors_base) 822void __init early_trap_init(void *vectors_base)
814{ 823{
@@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base)
816 unsigned long vectors = (unsigned long)vectors_base; 825 unsigned long vectors = (unsigned long)vectors_base;
817 extern char __stubs_start[], __stubs_end[]; 826 extern char __stubs_start[], __stubs_end[];
818 extern char __vectors_start[], __vectors_end[]; 827 extern char __vectors_start[], __vectors_end[];
819 extern char __kuser_helper_start[], __kuser_helper_end[]; 828 unsigned i;
820 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
821 829
822 vectors_page = vectors_base; 830 vectors_page = vectors_base;
823 831
824 /* 832 /*
833 * Poison the vectors page with an undefined instruction. This
834 * instruction is chosen to be undefined for both ARM and Thumb
835 * ISAs. The Thumb version is an undefined instruction with a
836 * branch back to the undefined instruction.
837 */
838 for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
839 ((u32 *)vectors_base)[i] = 0xe7fddef1;
840
841 /*
825 * Copy the vectors, stubs and kuser helpers (in entry-armv.S) 842 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
826 * into the vector page, mapped at 0xffff0000, and ensure these 843 * into the vector page, mapped at 0xffff0000, and ensure these
827 * are visible to the instruction stream. 844 * are visible to the instruction stream.
828 */ 845 */
829 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); 846 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
830 memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); 847 memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
831 memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
832 848
833 /* 849 kuser_init(vectors_base);
834 * Do processor specific fixups for the kuser helpers
835 */
836 kuser_get_tls_init(vectors);
837
838 /*
839 * Copy signal return handlers into the vector page, and
840 * set sigreturn to be a pointer to these.
841 */
842 memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
843 sigreturn_codes, sizeof(sigreturn_codes));
844 850
845 flush_icache_range(vectors, vectors + PAGE_SIZE); 851 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
846 modify_domain(DOMAIN_USER, DOMAIN_CLIENT); 852 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
847#else /* ifndef CONFIG_CPU_V7M */ 853#else /* ifndef CONFIG_CPU_V7M */
848 /* 854 /*
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index fa25e4e425f6..7bcee5c9b604 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -148,6 +148,23 @@ SECTIONS
148 . = ALIGN(PAGE_SIZE); 148 . = ALIGN(PAGE_SIZE);
149 __init_begin = .; 149 __init_begin = .;
150#endif 150#endif
151 /*
152 * The vectors and stubs are relocatable code, and the
153 * only thing that matters is their relative offsets
154 */
155 __vectors_start = .;
156 .vectors 0 : AT(__vectors_start) {
157 *(.vectors)
158 }
159 . = __vectors_start + SIZEOF(.vectors);
160 __vectors_end = .;
161
162 __stubs_start = .;
163 .stubs 0x1000 : AT(__stubs_start) {
164 *(.stubs)
165 }
166 . = __stubs_start + SIZEOF(.stubs);
167 __stubs_end = .;
151 168
152 INIT_TEXT_SECTION(8) 169 INIT_TEXT_SECTION(8)
153 .exit.text : { 170 .exit.text : {
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 4a5199070430..db9cf692d4dd 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
146#define access_pmintenclr pm_fake 146#define access_pmintenclr pm_fake
147 147
148/* Architected CP15 registers. 148/* Architected CP15 registers.
149 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 149 * CRn denotes the primary register number, but is copied to the CRm in the
150 * user space API for 64-bit register access in line with the terminology used
151 * in the ARM ARM.
152 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
153 * registers preceding 32-bit ones.
150 */ 154 */
151static const struct coproc_reg cp15_regs[] = { 155static const struct coproc_reg cp15_regs[] = {
152 /* CSSELR: swapped by interrupt.S. */ 156 /* CSSELR: swapped by interrupt.S. */
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = {
154 NULL, reset_unknown, c0_CSSELR }, 158 NULL, reset_unknown, c0_CSSELR },
155 159
156 /* TTBR0/TTBR1: swapped by interrupt.S. */ 160 /* TTBR0/TTBR1: swapped by interrupt.S. */
157 { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, 161 { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
158 { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, 162 { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
159 163
160 /* TTBCR: swapped by interrupt.S. */ 164 /* TTBCR: swapped by interrupt.S. */
161 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, 165 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = {
182 NULL, reset_unknown, c6_IFAR }, 186 NULL, reset_unknown, c6_IFAR },
183 187
184 /* PAR swapped by interrupt.S */ 188 /* PAR swapped by interrupt.S */
185 { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, 189 { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
186 190
187 /* 191 /*
188 * DC{C,I,CI}SW operations: 192 * DC{C,I,CI}SW operations:
@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params)
399 | KVM_REG_ARM_OPC1_MASK)) 403 | KVM_REG_ARM_OPC1_MASK))
400 return false; 404 return false;
401 params->is_64bit = true; 405 params->is_64bit = true;
402 params->CRm = ((id & KVM_REG_ARM_CRM_MASK) 406 /* CRm to CRn: see cp15_to_index for details */
407 params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
403 >> KVM_REG_ARM_CRM_SHIFT); 408 >> KVM_REG_ARM_CRM_SHIFT);
404 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 409 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
405 >> KVM_REG_ARM_OPC1_SHIFT); 410 >> KVM_REG_ARM_OPC1_SHIFT);
406 params->Op2 = 0; 411 params->Op2 = 0;
407 params->CRn = 0; 412 params->CRm = 0;
408 return true; 413 return true;
409 default: 414 default:
410 return false; 415 return false;
@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg)
898 if (reg->is_64) { 903 if (reg->is_64) {
899 val |= KVM_REG_SIZE_U64; 904 val |= KVM_REG_SIZE_U64;
900 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 905 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
901 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); 906 /*
907 * CRn always denotes the primary coproc. reg. nr. for the
908 * in-kernel representation, but the user space API uses the
909 * CRm for the encoding, because it is modelled after the
910 * MRRC/MCRR instructions: see the ARM ARM rev. c page
911 * B3-1445
912 */
913 val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
902 } else { 914 } else {
903 val |= KVM_REG_SIZE_U32; 915 val |= KVM_REG_SIZE_U32;
904 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 916 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index b7301d3e4799..0461d5c8d3de 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
135 return -1; 135 return -1;
136 if (i1->CRn != i2->CRn) 136 if (i1->CRn != i2->CRn)
137 return i1->CRn - i2->CRn; 137 return i1->CRn - i2->CRn;
138 if (i1->is_64 != i2->is_64)
139 return i2->is_64 - i1->is_64;
138 if (i1->CRm != i2->CRm) 140 if (i1->CRm != i2->CRm)
139 return i1->CRm - i2->CRm; 141 return i1->CRm - i2->CRm;
140 if (i1->Op1 != i2->Op1) 142 if (i1->Op1 != i2->Op1)
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
145 147
146#define CRn(_x) .CRn = _x 148#define CRn(_x) .CRn = _x
147#define CRm(_x) .CRm = _x 149#define CRm(_x) .CRm = _x
150#define CRm64(_x) .CRn = _x, .CRm = 0
148#define Op1(_x) .Op1 = _x 151#define Op1(_x) .Op1 = _x
149#define Op2(_x) .Op2 = _x 152#define Op2(_x) .Op2 = _x
150#define is64 .is_64 = true 153#define is64 .is_64 = true
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index 685063a6d0cf..cf93472b9dd6 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
114 114
115/* 115/*
116 * A15-specific CP15 registers. 116 * A15-specific CP15 registers.
117 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 117 * CRn denotes the primary register number, but is copied to the CRm in the
118 * user space API for 64-bit register access in line with the terminology used
119 * in the ARM ARM.
120 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
121 * registers preceding 32-bit ones.
118 */ 122 */
119static const struct coproc_reg a15_regs[] = { 123static const struct coproc_reg a15_regs[] = {
120 /* MPIDR: we use VMPIDR for guest access. */ 124 /* MPIDR: we use VMPIDR for guest access. */
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index b8e06b7a2833..0c25d9487d53 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
63static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 63static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
64 struct kvm_exit_mmio *mmio) 64 struct kvm_exit_mmio *mmio)
65{ 65{
66 unsigned long rt, len; 66 unsigned long rt;
67 int len;
67 bool is_write, sign_extend; 68 bool is_write, sign_extend;
68 69
69 if (kvm_vcpu_dabt_isextabt(vcpu)) { 70 if (kvm_vcpu_dabt_isextabt(vcpu)) {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index ca6bea4859b4..0988d9e04dd4 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
85 return p; 85 return p;
86} 86}
87 87
88static bool page_empty(void *ptr)
89{
90 struct page *ptr_page = virt_to_page(ptr);
91 return page_count(ptr_page) == 1;
92}
93
88static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 94static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
89{ 95{
90 pmd_t *pmd_table = pmd_offset(pud, 0); 96 pmd_t *pmd_table = pmd_offset(pud, 0);
@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
103 put_page(virt_to_page(pmd)); 109 put_page(virt_to_page(pmd));
104} 110}
105 111
106static bool pmd_empty(pmd_t *pmd)
107{
108 struct page *pmd_page = virt_to_page(pmd);
109 return page_count(pmd_page) == 1;
110}
111
112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) 112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
113{ 113{
114 if (pte_present(*pte)) { 114 if (pte_present(*pte)) {
@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
118 } 118 }
119} 119}
120 120
121static bool pte_empty(pte_t *pte)
122{
123 struct page *pte_page = virt_to_page(pte);
124 return page_count(pte_page) == 1;
125}
126
127static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 121static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
128 unsigned long long start, u64 size) 122 unsigned long long start, u64 size)
129{ 123{
@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
132 pmd_t *pmd; 126 pmd_t *pmd;
133 pte_t *pte; 127 pte_t *pte;
134 unsigned long long addr = start, end = start + size; 128 unsigned long long addr = start, end = start + size;
135 u64 range; 129 u64 next;
136 130
137 while (addr < end) { 131 while (addr < end) {
138 pgd = pgdp + pgd_index(addr); 132 pgd = pgdp + pgd_index(addr);
139 pud = pud_offset(pgd, addr); 133 pud = pud_offset(pgd, addr);
140 if (pud_none(*pud)) { 134 if (pud_none(*pud)) {
141 addr += PUD_SIZE; 135 addr = pud_addr_end(addr, end);
142 continue; 136 continue;
143 } 137 }
144 138
145 pmd = pmd_offset(pud, addr); 139 pmd = pmd_offset(pud, addr);
146 if (pmd_none(*pmd)) { 140 if (pmd_none(*pmd)) {
147 addr += PMD_SIZE; 141 addr = pmd_addr_end(addr, end);
148 continue; 142 continue;
149 } 143 }
150 144
151 pte = pte_offset_kernel(pmd, addr); 145 pte = pte_offset_kernel(pmd, addr);
152 clear_pte_entry(kvm, pte, addr); 146 clear_pte_entry(kvm, pte, addr);
153 range = PAGE_SIZE; 147 next = addr + PAGE_SIZE;
154 148
155 /* If we emptied the pte, walk back up the ladder */ 149 /* If we emptied the pte, walk back up the ladder */
156 if (pte_empty(pte)) { 150 if (page_empty(pte)) {
157 clear_pmd_entry(kvm, pmd, addr); 151 clear_pmd_entry(kvm, pmd, addr);
158 range = PMD_SIZE; 152 next = pmd_addr_end(addr, end);
159 if (pmd_empty(pmd)) { 153 if (page_empty(pmd) && !page_empty(pud)) {
160 clear_pud_entry(kvm, pud, addr); 154 clear_pud_entry(kvm, pud, addr);
161 range = PUD_SIZE; 155 next = pud_addr_end(addr, end);
162 } 156 }
163 } 157 }
164 158
165 addr += range; 159 addr = next;
166 } 160 }
167} 161}
168 162
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 2abee6626aac..916e5a142917 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
227 CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), 227 CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk),
228 CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), 228 CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk),
229 CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), 229 CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk),
230 CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk),
231 CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk),
230 CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), 232 CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk),
231 CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), 233 CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk),
232 CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), 234 CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk),
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index dff4ddc5ef81..139e42da25f0 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
75 .parts = davinci_nand_partitions, 75 .parts = davinci_nand_partitions,
76 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 76 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
77 .ecc_mode = NAND_ECC_HW_SYNDROME, 77 .ecc_mode = NAND_ECC_HW_SYNDROME,
78 .ecc_bits = 4,
78 .bbt_options = NAND_BBT_USE_FLASH, 79 .bbt_options = NAND_BBT_USE_FLASH,
79}; 80};
80 81
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index afbc439f11d4..4cdb61c54459 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -505,7 +505,7 @@ static struct vpbe_output dm365evm_vpbe_outputs[] = {
505/* 505/*
506 * Amplifiers on the board 506 * Amplifiers on the board
507 */ 507 */
508struct ths7303_platform_data ths7303_pdata = { 508static struct ths7303_platform_data ths7303_pdata = {
509 .ch_1 = 3, 509 .ch_1 = 3,
510 .ch_2 = 3, 510 .ch_2 = 3,
511 .ch_3 = 3, 511 .ch_3 = 3,
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a33686a6fbb2..fa4bfaf952d8 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
153 .parts = davinci_evm_nandflash_partition, 153 .parts = davinci_evm_nandflash_partition,
154 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), 154 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
155 .ecc_mode = NAND_ECC_HW, 155 .ecc_mode = NAND_ECC_HW,
156 .ecc_bits = 1,
156 .bbt_options = NAND_BBT_USE_FLASH, 157 .bbt_options = NAND_BBT_USE_FLASH,
157 .timing = &davinci_evm_nandflash_timing, 158 .timing = &davinci_evm_nandflash_timing,
158}; 159};
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index fbb8e5ab1dc1..0c005e876cac 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
90 .parts = davinci_nand_partitions, 90 .parts = davinci_nand_partitions,
91 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 91 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
92 .ecc_mode = NAND_ECC_HW, 92 .ecc_mode = NAND_ECC_HW,
93 .ecc_bits = 1,
93 .options = 0, 94 .options = 0,
94}; 95};
95 96
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 2bc112adf565..808233b60e3d 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
88 .parts = davinci_ntosd2_nandflash_partition, 88 .parts = davinci_ntosd2_nandflash_partition,
89 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), 89 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
90 .ecc_mode = NAND_ECC_HW, 90 .ecc_mode = NAND_ECC_HW,
91 .ecc_bits = 1,
91 .bbt_options = NAND_BBT_USE_FLASH, 92 .bbt_options = NAND_BBT_USE_FLASH,
92}; 93};
93 94
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 42ef53f62c6c..86100d179694 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -860,7 +860,7 @@ static struct platform_device dm355_vpbe_display = {
860 }, 860 },
861}; 861};
862 862
863struct venc_platform_data dm355_venc_pdata = { 863static struct venc_platform_data dm355_venc_pdata = {
864 .setup_pinmux = dm355_vpbe_setup_pinmux, 864 .setup_pinmux = dm355_vpbe_setup_pinmux,
865 .setup_clock = dm355_venc_setup_clock, 865 .setup_clock = dm355_venc_setup_clock,
866}; 866};
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index fa7af5eda52d..dad28029ba9b 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -1349,7 +1349,7 @@ static struct platform_device dm365_vpbe_display = {
1349 }, 1349 },
1350}; 1350};
1351 1351
1352struct venc_platform_data dm365_venc_pdata = { 1352static struct venc_platform_data dm365_venc_pdata = {
1353 .setup_pinmux = dm365_vpbe_setup_pinmux, 1353 .setup_pinmux = dm365_vpbe_setup_pinmux,
1354 .setup_clock = dm365_venc_setup_clock, 1354 .setup_clock = dm365_venc_setup_clock,
1355}; 1355};
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 855d4a7b462d..5952e68c76c4 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -92,6 +92,7 @@ config SOC_EXYNOS5440
92 bool "SAMSUNG EXYNOS5440" 92 bool "SAMSUNG EXYNOS5440"
93 default y 93 default y
94 depends on ARCH_EXYNOS5 94 depends on ARCH_EXYNOS5
95 select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
95 select ARCH_HAS_OPP 96 select ARCH_HAS_OPP
96 select HAVE_ARM_ARCH_TIMER 97 select HAVE_ARM_ARCH_TIMER
97 select AUTO_ZRELADDR 98 select AUTO_ZRELADDR
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index e970a7a4e278..53696154aead 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -14,7 +14,7 @@ obj- :=
14 14
15obj-$(CONFIG_ARCH_EXYNOS) += common.o 15obj-$(CONFIG_ARCH_EXYNOS) += common.o
16 16
17obj-$(CONFIG_PM) += pm.o 17obj-$(CONFIG_S5P_PM) += pm.o
18obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o 18obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
19obj-$(CONFIG_CPU_IDLE) += cpuidle.o 19obj-$(CONFIG_CPU_IDLE) += cpuidle.o
20 20
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 164685bd25c8..ba95e5db2501 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -58,7 +58,6 @@ static const char name_exynos5440[] = "EXYNOS5440";
58 58
59static void exynos4_map_io(void); 59static void exynos4_map_io(void);
60static void exynos5_map_io(void); 60static void exynos5_map_io(void);
61static void exynos5440_map_io(void);
62static int exynos_init(void); 61static int exynos_init(void);
63 62
64static struct cpu_table cpu_ids[] __initdata = { 63static struct cpu_table cpu_ids[] __initdata = {
@@ -95,7 +94,6 @@ static struct cpu_table cpu_ids[] __initdata = {
95 }, { 94 }, {
96 .idcode = EXYNOS5440_SOC_ID, 95 .idcode = EXYNOS5440_SOC_ID,
97 .idmask = EXYNOS5_SOC_MASK, 96 .idmask = EXYNOS5_SOC_MASK,
98 .map_io = exynos5440_map_io,
99 .init = exynos_init, 97 .init = exynos_init,
100 .name = name_exynos5440, 98 .name = name_exynos5440,
101 }, 99 },
@@ -150,11 +148,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
150 .length = SZ_64K, 148 .length = SZ_64K,
151 .type = MT_DEVICE, 149 .type = MT_DEVICE,
152 }, { 150 }, {
153 .virtual = (unsigned long)S3C_VA_UART,
154 .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
155 .length = SZ_512K,
156 .type = MT_DEVICE,
157 }, {
158 .virtual = (unsigned long)S5P_VA_CMU, 151 .virtual = (unsigned long)S5P_VA_CMU,
159 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU), 152 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
160 .length = SZ_128K, 153 .length = SZ_128K,
@@ -268,20 +261,6 @@ static struct map_desc exynos5_iodesc[] __initdata = {
268 .pfn = __phys_to_pfn(EXYNOS5_PA_PMU), 261 .pfn = __phys_to_pfn(EXYNOS5_PA_PMU),
269 .length = SZ_64K, 262 .length = SZ_64K,
270 .type = MT_DEVICE, 263 .type = MT_DEVICE,
271 }, {
272 .virtual = (unsigned long)S3C_VA_UART,
273 .pfn = __phys_to_pfn(EXYNOS5_PA_UART),
274 .length = SZ_512K,
275 .type = MT_DEVICE,
276 },
277};
278
279static struct map_desc exynos5440_iodesc0[] __initdata = {
280 {
281 .virtual = (unsigned long)S3C_VA_UART,
282 .pfn = __phys_to_pfn(EXYNOS5440_PA_UART0),
283 .length = SZ_512K,
284 .type = MT_DEVICE,
285 }, 264 },
286}; 265};
287 266
@@ -388,11 +367,6 @@ static void __init exynos5_map_io(void)
388 iotable_init(exynos5250_iodesc, ARRAY_SIZE(exynos5250_iodesc)); 367 iotable_init(exynos5250_iodesc, ARRAY_SIZE(exynos5250_iodesc));
389} 368}
390 369
391static void __init exynos5440_map_io(void)
392{
393 iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0));
394}
395
396void __init exynos_init_time(void) 370void __init exynos_init_time(void)
397{ 371{
398 of_clk_init(NULL); 372 of_clk_init(NULL);
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 3e156bcddcb4..972490fc09d6 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -97,6 +97,5 @@ struct exynos_pmu_conf {
97}; 97};
98 98
99extern void exynos_sys_powerdown_conf(enum sys_powerdown mode); 99extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
100extern void s3c_cpu_resume(void);
101 100
102#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */ 101#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 17a18ff3d71e..225ee8431c72 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -25,6 +25,7 @@
25#include <mach/regs-pmu.h> 25#include <mach/regs-pmu.h>
26 26
27#include <plat/cpu.h> 27#include <plat/cpu.h>
28#include <plat/pm.h>
28 29
29#include "common.h" 30#include "common.h"
30 31
diff --git a/arch/arm/mach-exynos/include/mach/memory.h b/arch/arm/mach-exynos/include/mach/memory.h
index 374ef2cf7152..2a4cdb7cb326 100644
--- a/arch/arm/mach-exynos/include/mach/memory.h
+++ b/arch/arm/mach-exynos/include/mach/memory.h
@@ -15,8 +15,13 @@
15 15
16#define PLAT_PHYS_OFFSET UL(0x40000000) 16#define PLAT_PHYS_OFFSET UL(0x40000000)
17 17
18#ifndef CONFIG_ARM_LPAE
18/* Maximum of 256MiB in one bank */ 19/* Maximum of 256MiB in one bank */
19#define MAX_PHYSMEM_BITS 32 20#define MAX_PHYSMEM_BITS 32
20#define SECTION_SIZE_BITS 28 21#define SECTION_SIZE_BITS 28
22#else
23#define MAX_PHYSMEM_BITS 36
24#define SECTION_SIZE_BITS 31
25#endif
21 26
22#endif /* __ASM_ARCH_MEMORY_H */ 27#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 41c20692a13f..c679db577269 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -217,6 +217,9 @@ static __init int exynos_pm_drvinit(void)
217 struct clk *pll_base; 217 struct clk *pll_base;
218 unsigned int tmp; 218 unsigned int tmp;
219 219
220 if (soc_is_exynos5440())
221 return 0;
222
220 s3c_pm_init(); 223 s3c_pm_init();
221 224
222 /* All wakeup disable */ 225 /* All wakeup disable */
@@ -340,6 +343,9 @@ static struct syscore_ops exynos_pm_syscore_ops = {
340 343
341static __init int exynos_pm_syscore_init(void) 344static __init int exynos_pm_syscore_init(void)
342{ 345{
346 if (soc_is_exynos5440())
347 return 0;
348
343 register_syscore_ops(&exynos_pm_syscore_ops); 349 register_syscore_ops(&exynos_pm_syscore_ops);
344 return 0; 350 return 0;
345} 351}
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index a7cd2cf5e08d..3490a24f969e 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -276,8 +276,6 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys)
276 276
277 sys->mem_offset = DC21285_PCI_MEM; 277 sys->mem_offset = DC21285_PCI_MEM;
278 278
279 pci_ioremap_io(0, DC21285_PCI_IO);
280
281 pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset); 279 pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset);
282 pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset); 280 pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
283 281
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index dc5d6becd8c7..88815795fe26 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -115,6 +115,7 @@ static int highbank_platform_notifier(struct notifier_block *nb,
115{ 115{
116 struct resource *res; 116 struct resource *res;
117 int reg = -1; 117 int reg = -1;
118 u32 val;
118 struct device *dev = __dev; 119 struct device *dev = __dev;
119 120
120 if (event != BUS_NOTIFY_ADD_DEVICE) 121 if (event != BUS_NOTIFY_ADD_DEVICE)
@@ -141,10 +142,10 @@ static int highbank_platform_notifier(struct notifier_block *nb,
141 return NOTIFY_DONE; 142 return NOTIFY_DONE;
142 143
143 if (of_property_read_bool(dev->of_node, "dma-coherent")) { 144 if (of_property_read_bool(dev->of_node, "dma-coherent")) {
144 writel(0xff31, sregs_base + reg); 145 val = readl(sregs_base + reg);
146 writel(val | 0xff01, sregs_base + reg);
145 set_dma_ops(dev, &arm_coherent_dma_ops); 147 set_dma_ops(dev, &arm_coherent_dma_ops);
146 } else 148 }
147 writel(0, sregs_base + reg);
148 149
149 return NOTIFY_OK; 150 return NOTIFY_OK;
150} 151}
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 4282e99f5ca1..86567d980b07 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -199,7 +199,8 @@ static const char *pcie_axi_sels[] = { "axi", "ahb", };
199static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_post_div", }; 199static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_post_div", };
200static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", }; 200static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
201static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", }; 201static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
202static const char *emi_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", }; 202static const char *emi_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", };
203static const char *emi_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
203static const char *vdo_axi_sels[] = { "axi", "ahb", }; 204static const char *vdo_axi_sels[] = { "axi", "ahb", };
204static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", }; 205static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
205static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div", 206static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
@@ -392,7 +393,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
392 clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); 393 clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
393 clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels)); 394 clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
394 clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels)); 395 clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels));
395 clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_sels, ARRAY_SIZE(emi_sels)); 396 clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_slow_sels, ARRAY_SIZE(emi_slow_sels));
396 clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels)); 397 clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
397 clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels)); 398 clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
398 clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); 399 clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
diff --git a/arch/arm/mach-imx/clk-vf610.c b/arch/arm/mach-imx/clk-vf610.c
index d617c0b7c809..b169a396d93b 100644
--- a/arch/arm/mach-imx/clk-vf610.c
+++ b/arch/arm/mach-imx/clk-vf610.c
@@ -183,6 +183,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
183 clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7); 183 clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7);
184 clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24); 184 clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24);
185 clk[VF610_CLK_ENET_TS] = imx_clk_gate("enet_ts", "enet_ts_sel", CCM_CSCDR1, 23); 185 clk[VF610_CLK_ENET_TS] = imx_clk_gate("enet_ts", "enet_ts_sel", CCM_CSCDR1, 23);
186 clk[VF610_CLK_ENET0] = imx_clk_gate2("enet0", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(0));
187 clk[VF610_CLK_ENET1] = imx_clk_gate2("enet1", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(1));
186 188
187 clk[VF610_CLK_PIT] = imx_clk_gate2("pit", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(7)); 189 clk[VF610_CLK_PIT] = imx_clk_gate2("pit", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(7));
188 190
diff --git a/arch/arm/mach-imx/mx27.h b/arch/arm/mach-imx/mx27.h
index e074616d54ca..8a65f192e7f3 100644
--- a/arch/arm/mach-imx/mx27.h
+++ b/arch/arm/mach-imx/mx27.h
@@ -135,7 +135,7 @@
135#define MX27_INT_GPT4 (NR_IRQS_LEGACY + 4) 135#define MX27_INT_GPT4 (NR_IRQS_LEGACY + 4)
136#define MX27_INT_RTIC (NR_IRQS_LEGACY + 5) 136#define MX27_INT_RTIC (NR_IRQS_LEGACY + 5)
137#define MX27_INT_CSPI3 (NR_IRQS_LEGACY + 6) 137#define MX27_INT_CSPI3 (NR_IRQS_LEGACY + 6)
138#define MX27_INT_SDHC (NR_IRQS_LEGACY + 7) 138#define MX27_INT_MSHC (NR_IRQS_LEGACY + 7)
139#define MX27_INT_GPIO (NR_IRQS_LEGACY + 8) 139#define MX27_INT_GPIO (NR_IRQS_LEGACY + 8)
140#define MX27_INT_SDHC3 (NR_IRQS_LEGACY + 9) 140#define MX27_INT_SDHC3 (NR_IRQS_LEGACY + 9)
141#define MX27_INT_SDHC2 (NR_IRQS_LEGACY + 10) 141#define MX27_INT_SDHC2 (NR_IRQS_LEGACY + 10)
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index fe4d9ff93a7e..b661c5c2870a 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -49,7 +49,7 @@ static const char *keystone_match[] __initconst = {
49 NULL, 49 NULL,
50}; 50};
51 51
52void keystone_restart(char mode, const char *cmd) 52void keystone_restart(enum reboot_mode mode, const char *cmd)
53{ 53{
54 u32 val; 54 u32 val;
55 55
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 614e41e7881b..905efc8cac79 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -121,8 +121,7 @@ config MSM_SMD
121 bool 121 bool
122 122
123config MSM_GPIOMUX 123config MSM_GPIOMUX
124 depends on !(ARCH_MSM8X60 || ARCH_MSM8960) 124 bool
125 bool "MSM V1 TLMM GPIOMUX architecture"
126 help 125 help
127 Support for MSM V1 TLMM GPIOMUX architecture. 126 Support for MSM V1 TLMM GPIOMUX architecture.
128 127
diff --git a/arch/arm/mach-msm/gpiomux-v1.c b/arch/arm/mach-msm/gpiomux-v1.c
deleted file mode 100644
index 27de2abd7144..000000000000
--- a/arch/arm/mach-msm/gpiomux-v1.c
+++ /dev/null
@@ -1,33 +0,0 @@
1/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 */
17#include <linux/kernel.h>
18#include "gpiomux.h"
19#include "proc_comm.h"
20
21void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val)
22{
23 unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) |
24 ((gpio & 0x3ff) << 4);
25 unsigned tlmm_disable = 0;
26 int rc;
27
28 rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
29 &tlmm_config, &tlmm_disable);
30 if (rc)
31 pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n",
32 __func__, rc, tlmm_config, tlmm_disable);
33}
diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h
index 8e82f41a8923..4410d7766f93 100644
--- a/arch/arm/mach-msm/gpiomux.h
+++ b/arch/arm/mach-msm/gpiomux.h
@@ -73,16 +73,6 @@ extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS];
73int msm_gpiomux_write(unsigned gpio, 73int msm_gpiomux_write(unsigned gpio,
74 gpiomux_config_t active, 74 gpiomux_config_t active,
75 gpiomux_config_t suspended); 75 gpiomux_config_t suspended);
76
77/* Architecture-internal function for use by the framework only.
78 * This function can assume the following:
79 * - the gpio value has passed a bounds-check
80 * - the gpiomux spinlock has been obtained
81 *
82 * This function is not for public consumption. External users
83 * should use msm_gpiomux_write.
84 */
85void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val);
86#else 76#else
87static inline int msm_gpiomux_write(unsigned gpio, 77static inline int msm_gpiomux_write(unsigned gpio,
88 gpiomux_config_t active, 78 gpiomux_config_t active,
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 627fa7e41fba..3eed0006d189 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,7 +62,7 @@ config SOC_OMAP5
62 select HAVE_SMP 62 select HAVE_SMP
63 select COMMON_CLK 63 select COMMON_CLK
64 select HAVE_ARM_ARCH_TIMER 64 select HAVE_ARM_ARCH_TIMER
65 select ARM_ERRATA_798181 65 select ARM_ERRATA_798181 if SMP
66 66
67config SOC_AM33XX 67config SOC_AM33XX
68 bool "AM33XX support" 68 bool "AM33XX support"
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index e5fbfed69aa2..be5d005ebad2 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -15,6 +15,7 @@
15#include <linux/of_irq.h> 15#include <linux/of_irq.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/irqdomain.h> 17#include <linux/irqdomain.h>
18#include <linux/clk.h>
18 19
19#include <asm/mach/arch.h> 20#include <asm/mach/arch.h>
20 21
@@ -35,6 +36,21 @@ static struct of_device_id omap_dt_match_table[] __initdata = {
35 { } 36 { }
36}; 37};
37 38
39/*
40 * Create alias for USB host PHY clock.
41 * Remove this when clock phandle can be provided via DT
42 */
43static void __init legacy_init_ehci_clk(char *clkname)
44{
45 int ret;
46
47 ret = clk_add_alias("main_clk", NULL, clkname, NULL);
48 if (ret) {
49 pr_err("%s:Failed to add main_clk alias to %s :%d\n",
50 __func__, clkname, ret);
51 }
52}
53
38static void __init omap_generic_init(void) 54static void __init omap_generic_init(void)
39{ 55{
40 omap_sdrc_init(NULL, NULL); 56 omap_sdrc_init(NULL, NULL);
@@ -45,10 +61,15 @@ static void __init omap_generic_init(void)
45 * HACK: call display setup code for selected boards to enable omapdss. 61 * HACK: call display setup code for selected boards to enable omapdss.
46 * This will be removed when omapdss supports DT. 62 * This will be removed when omapdss supports DT.
47 */ 63 */
48 if (of_machine_is_compatible("ti,omap4-panda")) 64 if (of_machine_is_compatible("ti,omap4-panda")) {
49 omap4_panda_display_init_of(); 65 omap4_panda_display_init_of();
66 legacy_init_ehci_clk("auxclk3_ck");
67
68 }
50 else if (of_machine_is_compatible("ti,omap4-sdp")) 69 else if (of_machine_is_compatible("ti,omap4-sdp"))
51 omap_4430sdp_display_init_of(); 70 omap_4430sdp_display_init_of();
71 else if (of_machine_is_compatible("ti,omap5-uevm"))
72 legacy_init_ehci_clk("auxclk1_ck");
52} 73}
53 74
54#ifdef CONFIG_SOC_OMAP2420 75#ifdef CONFIG_SOC_OMAP2420
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index f6eeb87e4e95..827d15009a86 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = {
122}; 122};
123 123
124static struct musb_hdrc_platform_data tusb_data = { 124static struct musb_hdrc_platform_data tusb_data = {
125#ifdef CONFIG_USB_GADGET_MUSB_HDRC
126 .mode = MUSB_OTG, 125 .mode = MUSB_OTG,
127#else
128 .mode = MUSB_HOST,
129#endif
130 .set_power = tusb_set_power, 126 .set_power = tusb_set_power,
131 .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ 127 .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */
132 .power = 100, /* Max 100 mA VBUS for host mode */ 128 .power = 100, /* Max 100 mA VBUS for host mode */
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index d2ea68ea678a..7735105561d8 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = {
85 85
86static struct omap_musb_board_data musb_board_data = { 86static struct omap_musb_board_data musb_board_data = {
87 .interface_type = MUSB_INTERFACE_ULPI, 87 .interface_type = MUSB_INTERFACE_ULPI,
88 .mode = MUSB_PERIPHERAL, 88 .mode = MUSB_OTG,
89 .power = 0, 89 .power = 0,
90}; 90};
91 91
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index 393aeefaebb0..043e5705f2a6 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -42,7 +42,7 @@
42 42
43/* Using generic display panel */ 43/* Using generic display panel */
44static struct tfp410_platform_data omap4_dvi_panel = { 44static struct tfp410_platform_data omap4_dvi_panel = {
45 .i2c_bus_num = 3, 45 .i2c_bus_num = 2,
46 .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO, 46 .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
47}; 47};
48 48
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 5cc92874be7e..f99f68e1e85b 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -129,6 +129,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
129 struct device_node *node = pdev->dev.of_node; 129 struct device_node *node = pdev->dev.of_node;
130 const char *oh_name; 130 const char *oh_name;
131 int oh_cnt, i, ret = 0; 131 int oh_cnt, i, ret = 0;
132 bool device_active = false;
132 133
133 oh_cnt = of_property_count_strings(node, "ti,hwmods"); 134 oh_cnt = of_property_count_strings(node, "ti,hwmods");
134 if (oh_cnt <= 0) { 135 if (oh_cnt <= 0) {
@@ -152,6 +153,8 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
152 goto odbfd_exit1; 153 goto odbfd_exit1;
153 } 154 }
154 hwmods[i] = oh; 155 hwmods[i] = oh;
156 if (oh->flags & HWMOD_INIT_NO_IDLE)
157 device_active = true;
155 } 158 }
156 159
157 od = omap_device_alloc(pdev, hwmods, oh_cnt); 160 od = omap_device_alloc(pdev, hwmods, oh_cnt);
@@ -172,6 +175,11 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
172 175
173 pdev->dev.pm_domain = &omap_device_pm_domain; 176 pdev->dev.pm_domain = &omap_device_pm_domain;
174 177
178 if (device_active) {
179 omap_device_enable(pdev);
180 pm_runtime_set_active(&pdev->dev);
181 }
182
175odbfd_exit1: 183odbfd_exit1:
176 kfree(hwmods); 184 kfree(hwmods);
177odbfd_exit: 185odbfd_exit:
@@ -842,6 +850,7 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
842{ 850{
843 struct platform_device *pdev = to_platform_device(dev); 851 struct platform_device *pdev = to_platform_device(dev);
844 struct omap_device *od = to_omap_device(pdev); 852 struct omap_device *od = to_omap_device(pdev);
853 int i;
845 854
846 if (!od) 855 if (!od)
847 return 0; 856 return 0;
@@ -850,6 +859,15 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
850 * If omap_device state is enabled, but has no driver bound, 859 * If omap_device state is enabled, but has no driver bound,
851 * idle it. 860 * idle it.
852 */ 861 */
862
863 /*
864 * Some devices (like memory controllers) are always kept
865 * enabled, and should not be idled even with no drivers.
866 */
867 for (i = 0; i < od->hwmods_cnt; i++)
868 if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
869 return 0;
870
853 if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { 871 if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
854 if (od->_state == OMAP_DEVICE_STATE_ENABLED) { 872 if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
855 dev_warn(dev, "%s: enabled but no driver. Idling\n", 873 dev_warn(dev, "%s: enabled but no driver. Idling\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 7341eff63f56..7f4db12b1459 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2386,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
2386 2386
2387 np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); 2387 np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
2388 if (np) 2388 if (np)
2389 va_start = of_iomap(np, 0); 2389 va_start = of_iomap(np, oh->mpu_rt_idx);
2390 } else { 2390 } else {
2391 va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); 2391 va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
2392 } 2392 }
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index aab33fd814c0..e1482a9b3bc2 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -95,6 +95,54 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
95#define MODULEMODE_HWCTRL 1 95#define MODULEMODE_HWCTRL 1
96#define MODULEMODE_SWCTRL 2 96#define MODULEMODE_SWCTRL 2
97 97
98#define DEBUG_OMAP2UART1_FLAGS 0
99#define DEBUG_OMAP2UART2_FLAGS 0
100#define DEBUG_OMAP2UART3_FLAGS 0
101#define DEBUG_OMAP3UART3_FLAGS 0
102#define DEBUG_OMAP3UART4_FLAGS 0
103#define DEBUG_OMAP4UART3_FLAGS 0
104#define DEBUG_OMAP4UART4_FLAGS 0
105#define DEBUG_TI81XXUART1_FLAGS 0
106#define DEBUG_TI81XXUART2_FLAGS 0
107#define DEBUG_TI81XXUART3_FLAGS 0
108#define DEBUG_AM33XXUART1_FLAGS 0
109
110#define DEBUG_OMAPUART_FLAGS (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET)
111
112#if defined(CONFIG_DEBUG_OMAP2UART1)
113#undef DEBUG_OMAP2UART1_FLAGS
114#define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS
115#elif defined(CONFIG_DEBUG_OMAP2UART2)
116#undef DEBUG_OMAP2UART2_FLAGS
117#define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS
118#elif defined(CONFIG_DEBUG_OMAP2UART3)
119#undef DEBUG_OMAP2UART3_FLAGS
120#define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS
121#elif defined(CONFIG_DEBUG_OMAP3UART3)
122#undef DEBUG_OMAP3UART3_FLAGS
123#define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS
124#elif defined(CONFIG_DEBUG_OMAP3UART4)
125#undef DEBUG_OMAP3UART4_FLAGS
126#define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS
127#elif defined(CONFIG_DEBUG_OMAP4UART3)
128#undef DEBUG_OMAP4UART3_FLAGS
129#define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS
130#elif defined(CONFIG_DEBUG_OMAP4UART4)
131#undef DEBUG_OMAP4UART4_FLAGS
132#define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS
133#elif defined(CONFIG_DEBUG_TI81XXUART1)
134#undef DEBUG_TI81XXUART1_FLAGS
135#define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
136#elif defined(CONFIG_DEBUG_TI81XXUART2)
137#undef DEBUG_TI81XXUART2_FLAGS
138#define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS
139#elif defined(CONFIG_DEBUG_TI81XXUART3)
140#undef DEBUG_TI81XXUART3_FLAGS
141#define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS
142#elif defined(CONFIG_DEBUG_AM33XXUART1)
143#undef DEBUG_AM33XXUART1_FLAGS
144#define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
145#endif
98 146
99/** 147/**
100 * struct omap_hwmod_mux_info - hwmod specific mux configuration 148 * struct omap_hwmod_mux_info - hwmod specific mux configuration
@@ -568,6 +616,7 @@ struct omap_hwmod_link {
568 * @voltdm: pointer to voltage domain (filled in at runtime) 616 * @voltdm: pointer to voltage domain (filled in at runtime)
569 * @dev_attr: arbitrary device attributes that can be passed to the driver 617 * @dev_attr: arbitrary device attributes that can be passed to the driver
570 * @_sysc_cache: internal-use hwmod flags 618 * @_sysc_cache: internal-use hwmod flags
619 * @mpu_rt_idx: index of device address space for register target (for DT boot)
571 * @_mpu_rt_va: cached register target start address (internal use) 620 * @_mpu_rt_va: cached register target start address (internal use)
572 * @_mpu_port: cached MPU register target slave (internal use) 621 * @_mpu_port: cached MPU register target slave (internal use)
573 * @opt_clks_cnt: number of @opt_clks 622 * @opt_clks_cnt: number of @opt_clks
@@ -617,6 +666,7 @@ struct omap_hwmod {
617 struct list_head node; 666 struct list_head node;
618 struct omap_hwmod_ocp_if *_mpu_port; 667 struct omap_hwmod_ocp_if *_mpu_port;
619 u16 flags; 668 u16 flags;
669 u8 mpu_rt_idx;
620 u8 response_lat; 670 u8 response_lat;
621 u8 rst_lines_cnt; 671 u8 rst_lines_cnt;
622 u8 opt_clks_cnt; 672 u8 opt_clks_cnt;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index d05fc7b54567..56cebb05509e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -512,7 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = {
512 .mpu_irqs = omap2_uart1_mpu_irqs, 512 .mpu_irqs = omap2_uart1_mpu_irqs,
513 .sdma_reqs = omap2_uart1_sdma_reqs, 513 .sdma_reqs = omap2_uart1_sdma_reqs,
514 .main_clk = "uart1_fck", 514 .main_clk = "uart1_fck",
515 .flags = HWMOD_SWSUP_SIDLE_ACT, 515 .flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
516 .prcm = { 516 .prcm = {
517 .omap2 = { 517 .omap2 = {
518 .module_offs = CORE_MOD, 518 .module_offs = CORE_MOD,
@@ -532,7 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = {
532 .mpu_irqs = omap2_uart2_mpu_irqs, 532 .mpu_irqs = omap2_uart2_mpu_irqs,
533 .sdma_reqs = omap2_uart2_sdma_reqs, 533 .sdma_reqs = omap2_uart2_sdma_reqs,
534 .main_clk = "uart2_fck", 534 .main_clk = "uart2_fck",
535 .flags = HWMOD_SWSUP_SIDLE_ACT, 535 .flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
536 .prcm = { 536 .prcm = {
537 .omap2 = { 537 .omap2 = {
538 .module_offs = CORE_MOD, 538 .module_offs = CORE_MOD,
@@ -552,7 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = {
552 .mpu_irqs = omap2_uart3_mpu_irqs, 552 .mpu_irqs = omap2_uart3_mpu_irqs,
553 .sdma_reqs = omap2_uart3_sdma_reqs, 553 .sdma_reqs = omap2_uart3_sdma_reqs,
554 .main_clk = "uart3_fck", 554 .main_clk = "uart3_fck",
555 .flags = HWMOD_SWSUP_SIDLE_ACT, 555 .flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
556 .prcm = { 556 .prcm = {
557 .omap2 = { 557 .omap2 = {
558 .module_offs = CORE_MOD, 558 .module_offs = CORE_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 28bbd56346a9..eb2f3b93b51c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -562,6 +562,7 @@ static struct omap_hwmod am33xx_cpgmac0_hwmod = {
562 .clkdm_name = "cpsw_125mhz_clkdm", 562 .clkdm_name = "cpsw_125mhz_clkdm",
563 .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY), 563 .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
564 .main_clk = "cpsw_125mhz_gclk", 564 .main_clk = "cpsw_125mhz_gclk",
565 .mpu_rt_idx = 1,
565 .prcm = { 566 .prcm = {
566 .omap4 = { 567 .omap4 = {
567 .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET, 568 .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET,
@@ -1512,7 +1513,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
1512 .name = "uart1", 1513 .name = "uart1",
1513 .class = &uart_class, 1514 .class = &uart_class,
1514 .clkdm_name = "l4_wkup_clkdm", 1515 .clkdm_name = "l4_wkup_clkdm",
1515 .flags = HWMOD_SWSUP_SIDLE_ACT, 1516 .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
1516 .main_clk = "dpll_per_m2_div4_wkupdm_ck", 1517 .main_clk = "dpll_per_m2_div4_wkupdm_ck",
1517 .prcm = { 1518 .prcm = {
1518 .omap4 = { 1519 .omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index f7a3df2fb579..0c3a427da544 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -490,7 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
490 .mpu_irqs = omap2_uart1_mpu_irqs, 490 .mpu_irqs = omap2_uart1_mpu_irqs,
491 .sdma_reqs = omap2_uart1_sdma_reqs, 491 .sdma_reqs = omap2_uart1_sdma_reqs,
492 .main_clk = "uart1_fck", 492 .main_clk = "uart1_fck",
493 .flags = HWMOD_SWSUP_SIDLE_ACT, 493 .flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
494 .prcm = { 494 .prcm = {
495 .omap2 = { 495 .omap2 = {
496 .module_offs = CORE_MOD, 496 .module_offs = CORE_MOD,
@@ -509,7 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
509 .mpu_irqs = omap2_uart2_mpu_irqs, 509 .mpu_irqs = omap2_uart2_mpu_irqs,
510 .sdma_reqs = omap2_uart2_sdma_reqs, 510 .sdma_reqs = omap2_uart2_sdma_reqs,
511 .main_clk = "uart2_fck", 511 .main_clk = "uart2_fck",
512 .flags = HWMOD_SWSUP_SIDLE_ACT, 512 .flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
513 .prcm = { 513 .prcm = {
514 .omap2 = { 514 .omap2 = {
515 .module_offs = CORE_MOD, 515 .module_offs = CORE_MOD,
@@ -528,7 +528,8 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
528 .mpu_irqs = omap2_uart3_mpu_irqs, 528 .mpu_irqs = omap2_uart3_mpu_irqs,
529 .sdma_reqs = omap2_uart3_sdma_reqs, 529 .sdma_reqs = omap2_uart3_sdma_reqs,
530 .main_clk = "uart3_fck", 530 .main_clk = "uart3_fck",
531 .flags = HWMOD_SWSUP_SIDLE_ACT, 531 .flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS |
532 HWMOD_SWSUP_SIDLE_ACT,
532 .prcm = { 533 .prcm = {
533 .omap2 = { 534 .omap2 = {
534 .module_offs = OMAP3430_PER_MOD, 535 .module_offs = OMAP3430_PER_MOD,
@@ -558,7 +559,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
558 .mpu_irqs = uart4_mpu_irqs, 559 .mpu_irqs = uart4_mpu_irqs,
559 .sdma_reqs = uart4_sdma_reqs, 560 .sdma_reqs = uart4_sdma_reqs,
560 .main_clk = "uart4_fck", 561 .main_clk = "uart4_fck",
561 .flags = HWMOD_SWSUP_SIDLE_ACT, 562 .flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
562 .prcm = { 563 .prcm = {
563 .omap2 = { 564 .omap2 = {
564 .module_offs = OMAP3430_PER_MOD, 565 .module_offs = OMAP3430_PER_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index d04b5e60fdbe..9c3b504477d7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2858,8 +2858,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
2858 .name = "uart3", 2858 .name = "uart3",
2859 .class = &omap44xx_uart_hwmod_class, 2859 .class = &omap44xx_uart_hwmod_class,
2860 .clkdm_name = "l4_per_clkdm", 2860 .clkdm_name = "l4_per_clkdm",
2861 .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET | 2861 .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
2862 HWMOD_SWSUP_SIDLE_ACT,
2863 .main_clk = "func_48m_fclk", 2862 .main_clk = "func_48m_fclk",
2864 .prcm = { 2863 .prcm = {
2865 .omap4 = { 2864 .omap4 = {
@@ -2875,7 +2874,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
2875 .name = "uart4", 2874 .name = "uart4",
2876 .class = &omap44xx_uart_hwmod_class, 2875 .class = &omap44xx_uart_hwmod_class,
2877 .clkdm_name = "l4_per_clkdm", 2876 .clkdm_name = "l4_per_clkdm",
2878 .flags = HWMOD_SWSUP_SIDLE_ACT, 2877 .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
2879 .main_clk = "func_48m_fclk", 2878 .main_clk = "func_48m_fclk",
2880 .prcm = { 2879 .prcm = {
2881 .omap4 = { 2880 .omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index f37ae96b70a1..3c70f5c1860f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -1375,7 +1375,7 @@ static struct omap_hwmod omap54xx_uart3_hwmod = {
1375 .name = "uart3", 1375 .name = "uart3",
1376 .class = &omap54xx_uart_hwmod_class, 1376 .class = &omap54xx_uart_hwmod_class,
1377 .clkdm_name = "l4per_clkdm", 1377 .clkdm_name = "l4per_clkdm",
1378 .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, 1378 .flags = DEBUG_OMAP4UART3_FLAGS,
1379 .main_clk = "func_48m_fclk", 1379 .main_clk = "func_48m_fclk",
1380 .prcm = { 1380 .prcm = {
1381 .omap4 = { 1381 .omap4 = {
@@ -1391,6 +1391,7 @@ static struct omap_hwmod omap54xx_uart4_hwmod = {
1391 .name = "uart4", 1391 .name = "uart4",
1392 .class = &omap54xx_uart_hwmod_class, 1392 .class = &omap54xx_uart_hwmod_class,
1393 .clkdm_name = "l4per_clkdm", 1393 .clkdm_name = "l4per_clkdm",
1394 .flags = DEBUG_OMAP4UART4_FLAGS,
1394 .main_clk = "func_48m_fclk", 1395 .main_clk = "func_48m_fclk",
1395 .prcm = { 1396 .prcm = {
1396 .omap4 = { 1397 .omap4 = {
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 3a674de6cb63..a388f8c1bcb3 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -208,17 +208,6 @@ static int __init omap_serial_early_init(void)
208 pr_info("%s used as console in debug mode: uart%d clocks will not be gated", 208 pr_info("%s used as console in debug mode: uart%d clocks will not be gated",
209 uart_name, uart->num); 209 uart_name, uart->num);
210 } 210 }
211
212 /*
213 * omap-uart can be used for earlyprintk logs
214 * So if omap-uart is used as console then prevent
215 * uart reset and idle to get logs from omap-uart
216 * until uart console driver is available to take
217 * care for console messages.
218 * Idling or resetting omap-uart while printing logs
219 * early boot logs can stall the boot-up.
220 */
221 oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
222 } 211 }
223 } while (1); 212 } while (1);
224 213
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8c4de2708cf2..bc897231bd10 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = {
38}; 38};
39 39
40static struct musb_hdrc_platform_data musb_plat = { 40static struct musb_hdrc_platform_data musb_plat = {
41#ifdef CONFIG_USB_GADGET_MUSB_HDRC
42 .mode = MUSB_OTG, 41 .mode = MUSB_OTG,
43#else 42
44 .mode = MUSB_HOST,
45#endif
46 /* .clock is set dynamically */ 43 /* .clock is set dynamically */
47 .config = &musb_config, 44 .config = &musb_config,
48 45
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index f6726bb4eb95..3a3362fa793e 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -477,16 +477,24 @@ static int em_x270_usb_hub_init(void)
477 /* USB Hub power-on and reset */ 477 /* USB Hub power-on and reset */
478 gpio_direction_output(usb_hub_reset, 1); 478 gpio_direction_output(usb_hub_reset, 1);
479 gpio_direction_output(GPIO9_USB_VBUS_EN, 0); 479 gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
480 regulator_enable(em_x270_usb_ldo); 480 err = regulator_enable(em_x270_usb_ldo);
481 if (err)
482 goto err_free_rst_gpio;
483
481 gpio_set_value(usb_hub_reset, 0); 484 gpio_set_value(usb_hub_reset, 0);
482 gpio_set_value(usb_hub_reset, 1); 485 gpio_set_value(usb_hub_reset, 1);
483 regulator_disable(em_x270_usb_ldo); 486 regulator_disable(em_x270_usb_ldo);
484 regulator_enable(em_x270_usb_ldo); 487 err = regulator_enable(em_x270_usb_ldo);
488 if (err)
489 goto err_free_rst_gpio;
490
485 gpio_set_value(usb_hub_reset, 0); 491 gpio_set_value(usb_hub_reset, 0);
486 gpio_set_value(GPIO9_USB_VBUS_EN, 1); 492 gpio_set_value(GPIO9_USB_VBUS_EN, 1);
487 493
488 return 0; 494 return 0;
489 495
496err_free_rst_gpio:
497 gpio_free(usb_hub_reset);
490err_free_vbus_gpio: 498err_free_vbus_gpio:
491 gpio_free(GPIO9_USB_VBUS_EN); 499 gpio_free(GPIO9_USB_VBUS_EN);
492err_free_usb_ldo: 500err_free_usb_ldo:
@@ -592,7 +600,7 @@ err_irq:
592 return err; 600 return err;
593} 601}
594 602
595static void em_x270_mci_setpower(struct device *dev, unsigned int vdd) 603static int em_x270_mci_setpower(struct device *dev, unsigned int vdd)
596{ 604{
597 struct pxamci_platform_data* p_d = dev->platform_data; 605 struct pxamci_platform_data* p_d = dev->platform_data;
598 606
@@ -600,10 +608,11 @@ static void em_x270_mci_setpower(struct device *dev, unsigned int vdd)
600 int vdd_uV = (2000 + (vdd - __ffs(MMC_VDD_20_21)) * 100) * 1000; 608 int vdd_uV = (2000 + (vdd - __ffs(MMC_VDD_20_21)) * 100) * 1000;
601 609
602 regulator_set_voltage(em_x270_sdio_ldo, vdd_uV, vdd_uV); 610 regulator_set_voltage(em_x270_sdio_ldo, vdd_uV, vdd_uV);
603 regulator_enable(em_x270_sdio_ldo); 611 return regulator_enable(em_x270_sdio_ldo);
604 } else { 612 } else {
605 regulator_disable(em_x270_sdio_ldo); 613 regulator_disable(em_x270_sdio_ldo);
606 } 614 }
615 return 0;
607} 616}
608 617
609static void em_x270_mci_exit(struct device *dev, void *data) 618static void em_x270_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d2c652318376..dd70343c8708 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -408,7 +408,7 @@ static int mainstone_mci_init(struct device *dev, irq_handler_t mstone_detect_in
408 return err; 408 return err;
409} 409}
410 410
411static void mainstone_mci_setpower(struct device *dev, unsigned int vdd) 411static int mainstone_mci_setpower(struct device *dev, unsigned int vdd)
412{ 412{
413 struct pxamci_platform_data* p_d = dev->platform_data; 413 struct pxamci_platform_data* p_d = dev->platform_data;
414 414
@@ -420,6 +420,7 @@ static void mainstone_mci_setpower(struct device *dev, unsigned int vdd)
420 printk(KERN_DEBUG "%s: off\n", __func__); 420 printk(KERN_DEBUG "%s: off\n", __func__);
421 MST_MSCWR1 &= ~MST_MSCWR1_MMC_ON; 421 MST_MSCWR1 &= ~MST_MSCWR1_MMC_ON;
422 } 422 }
423 return 0;
423} 424}
424 425
425static void mainstone_mci_exit(struct device *dev, void *data) 426static void mainstone_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index fb7f1d1627dc..13e5b00eae90 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -335,7 +335,7 @@ static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int,
335 return err; 335 return err;
336} 336}
337 337
338static void pcm990_mci_setpower(struct device *dev, unsigned int vdd) 338static int pcm990_mci_setpower(struct device *dev, unsigned int vdd)
339{ 339{
340 struct pxamci_platform_data *p_d = dev->platform_data; 340 struct pxamci_platform_data *p_d = dev->platform_data;
341 u8 val; 341 u8 val;
@@ -348,6 +348,7 @@ static void pcm990_mci_setpower(struct device *dev, unsigned int vdd)
348 val &= ~PCM990_CTRL_MMC2PWR; 348 val &= ~PCM990_CTRL_MMC2PWR;
349 349
350 pcm990_cpld_writeb(PCM990_CTRL_MMC2PWR, PCM990_CTRL_REG5); 350 pcm990_cpld_writeb(PCM990_CTRL_MMC2PWR, PCM990_CTRL_REG5);
351 return 0;
351} 352}
352 353
353static void pcm990_mci_exit(struct device *dev, void *data) 354static void pcm990_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 711d37e26bd8..aedf053a1de5 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -258,7 +258,7 @@ err_free_2:
258 return err; 258 return err;
259} 259}
260 260
261static void poodle_mci_setpower(struct device *dev, unsigned int vdd) 261static int poodle_mci_setpower(struct device *dev, unsigned int vdd)
262{ 262{
263 struct pxamci_platform_data* p_d = dev->platform_data; 263 struct pxamci_platform_data* p_d = dev->platform_data;
264 264
@@ -270,6 +270,8 @@ static void poodle_mci_setpower(struct device *dev, unsigned int vdd)
270 gpio_set_value(POODLE_GPIO_SD_PWR1, 0); 270 gpio_set_value(POODLE_GPIO_SD_PWR1, 0);
271 gpio_set_value(POODLE_GPIO_SD_PWR, 0); 271 gpio_set_value(POODLE_GPIO_SD_PWR, 0);
272 } 272 }
273
274 return 0;
273} 275}
274 276
275static void poodle_mci_exit(struct device *dev, void *data) 277static void poodle_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 2125df0444e7..4c29173026e8 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -598,7 +598,7 @@ static inline void spitz_spi_init(void) {}
598 * NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to 598 * NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to
599 * give the card a chance to fully insert/eject. 599 * give the card a chance to fully insert/eject.
600 */ 600 */
601static void spitz_mci_setpower(struct device *dev, unsigned int vdd) 601static int spitz_mci_setpower(struct device *dev, unsigned int vdd)
602{ 602{
603 struct pxamci_platform_data* p_d = dev->platform_data; 603 struct pxamci_platform_data* p_d = dev->platform_data;
604 604
@@ -606,6 +606,8 @@ static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
606 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V); 606 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V);
607 else 607 else
608 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0); 608 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0);
609
610 return 0;
609} 611}
610 612
611static struct pxamci_platform_data spitz_mci_platform_data = { 613static struct pxamci_platform_data spitz_mci_platform_data = {
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 88fde43c948c..62aea3e835f3 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -734,9 +734,10 @@ static int stargate2_mci_init(struct device *dev,
734 * 734 *
735 * Very simple control. Either it is on or off and is controlled by 735 * Very simple control. Either it is on or off and is controlled by
736 * a gpio pin */ 736 * a gpio pin */
737static void stargate2_mci_setpower(struct device *dev, unsigned int vdd) 737static int stargate2_mci_setpower(struct device *dev, unsigned int vdd)
738{ 738{
739 gpio_set_value(SG2_SD_POWER_ENABLE, !!vdd); 739 gpio_set_value(SG2_SD_POWER_ENABLE, !!vdd);
740 return 0;
740} 741}
741 742
742static void stargate2_mci_exit(struct device *dev, void *data) 743static void stargate2_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2410.c b/arch/arm/mach-s3c24xx/clock-s3c2410.c
index 34fffdf6fc1d..564553694b54 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2410.c
@@ -119,66 +119,101 @@ static struct clk init_clocks_off[] = {
119 } 119 }
120}; 120};
121 121
122static struct clk init_clocks[] = { 122static struct clk clk_lcd = {
123 { 123 .name = "lcd",
124 .name = "lcd", 124 .parent = &clk_h,
125 .parent = &clk_h, 125 .enable = s3c2410_clkcon_enable,
126 .enable = s3c2410_clkcon_enable, 126 .ctrlbit = S3C2410_CLKCON_LCDC,
127 .ctrlbit = S3C2410_CLKCON_LCDC, 127};
128 }, { 128
129 .name = "gpio", 129static struct clk clk_gpio = {
130 .parent = &clk_p, 130 .name = "gpio",
131 .enable = s3c2410_clkcon_enable, 131 .parent = &clk_p,
132 .ctrlbit = S3C2410_CLKCON_GPIO, 132 .enable = s3c2410_clkcon_enable,
133 }, { 133 .ctrlbit = S3C2410_CLKCON_GPIO,
134 .name = "usb-host", 134};
135 .parent = &clk_h, 135
136 .enable = s3c2410_clkcon_enable, 136static struct clk clk_usb_host = {
137 .ctrlbit = S3C2410_CLKCON_USBH, 137 .name = "usb-host",
138 }, { 138 .parent = &clk_h,
139 .name = "usb-device", 139 .enable = s3c2410_clkcon_enable,
140 .parent = &clk_h, 140 .ctrlbit = S3C2410_CLKCON_USBH,
141 .enable = s3c2410_clkcon_enable, 141};
142 .ctrlbit = S3C2410_CLKCON_USBD, 142
143 }, { 143static struct clk clk_usb_device = {
144 .name = "timers", 144 .name = "usb-device",
145 .parent = &clk_p, 145 .parent = &clk_h,
146 .enable = s3c2410_clkcon_enable, 146 .enable = s3c2410_clkcon_enable,
147 .ctrlbit = S3C2410_CLKCON_PWMT, 147 .ctrlbit = S3C2410_CLKCON_USBD,
148 }, { 148};
149 .name = "uart", 149
150 .devname = "s3c2410-uart.0", 150static struct clk clk_timers = {
151 .parent = &clk_p, 151 .name = "timers",
152 .enable = s3c2410_clkcon_enable, 152 .parent = &clk_p,
153 .ctrlbit = S3C2410_CLKCON_UART0, 153 .enable = s3c2410_clkcon_enable,
154 }, { 154 .ctrlbit = S3C2410_CLKCON_PWMT,
155 .name = "uart", 155};
156 .devname = "s3c2410-uart.1", 156
157 .parent = &clk_p, 157struct clk s3c24xx_clk_uart0 = {
158 .enable = s3c2410_clkcon_enable, 158 .name = "uart",
159 .ctrlbit = S3C2410_CLKCON_UART1, 159 .devname = "s3c2410-uart.0",
160 }, { 160 .parent = &clk_p,
161 .name = "uart", 161 .enable = s3c2410_clkcon_enable,
162 .devname = "s3c2410-uart.2", 162 .ctrlbit = S3C2410_CLKCON_UART0,
163 .parent = &clk_p, 163};
164 .enable = s3c2410_clkcon_enable, 164
165 .ctrlbit = S3C2410_CLKCON_UART2, 165struct clk s3c24xx_clk_uart1 = {
166 }, { 166 .name = "uart",
167 .name = "rtc", 167 .devname = "s3c2410-uart.1",
168 .parent = &clk_p, 168 .parent = &clk_p,
169 .enable = s3c2410_clkcon_enable, 169 .enable = s3c2410_clkcon_enable,
170 .ctrlbit = S3C2410_CLKCON_RTC, 170 .ctrlbit = S3C2410_CLKCON_UART1,
171 }, { 171};
172 .name = "watchdog", 172
173 .parent = &clk_p, 173struct clk s3c24xx_clk_uart2 = {
174 .ctrlbit = 0, 174 .name = "uart",
175 }, { 175 .devname = "s3c2410-uart.2",
176 .name = "usb-bus-host", 176 .parent = &clk_p,
177 .parent = &clk_usb_bus, 177 .enable = s3c2410_clkcon_enable,
178 }, { 178 .ctrlbit = S3C2410_CLKCON_UART2,
179 .name = "usb-bus-gadget", 179};
180 .parent = &clk_usb_bus, 180
181 }, 181static struct clk clk_rtc = {
182 .name = "rtc",
183 .parent = &clk_p,
184 .enable = s3c2410_clkcon_enable,
185 .ctrlbit = S3C2410_CLKCON_RTC,
186};
187
188static struct clk clk_watchdog = {
189 .name = "watchdog",
190 .parent = &clk_p,
191 .ctrlbit = 0,
192};
193
194static struct clk clk_usb_bus_host = {
195 .name = "usb-bus-host",
196 .parent = &clk_usb_bus,
197};
198
199static struct clk clk_usb_bus_gadget = {
200 .name = "usb-bus-gadget",
201 .parent = &clk_usb_bus,
202};
203
204static struct clk *init_clocks[] = {
205 &clk_lcd,
206 &clk_gpio,
207 &clk_usb_host,
208 &clk_usb_device,
209 &clk_timers,
210 &s3c24xx_clk_uart0,
211 &s3c24xx_clk_uart1,
212 &s3c24xx_clk_uart2,
213 &clk_rtc,
214 &clk_watchdog,
215 &clk_usb_bus_host,
216 &clk_usb_bus_gadget,
182}; 217};
183 218
184/* s3c2410_baseclk_add() 219/* s3c2410_baseclk_add()
@@ -195,7 +230,6 @@ int __init s3c2410_baseclk_add(void)
195{ 230{
196 unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW); 231 unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
197 unsigned long clkcon = __raw_readl(S3C2410_CLKCON); 232 unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
198 struct clk *clkp;
199 struct clk *xtal; 233 struct clk *xtal;
200 int ret; 234 int ret;
201 int ptr; 235 int ptr;
@@ -207,8 +241,9 @@ int __init s3c2410_baseclk_add(void)
207 241
208 /* register clocks from clock array */ 242 /* register clocks from clock array */
209 243
210 clkp = init_clocks; 244 for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++) {
211 for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) { 245 struct clk *clkp = init_clocks[ptr];
246
212 /* ensure that we note the clock state */ 247 /* ensure that we note the clock state */
213 248
214 clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0; 249 clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0;
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 1069b5680826..aaf006d1d6dc 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -166,6 +166,9 @@ static struct clk_lookup s3c2440_clk_lookup[] = {
166 CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk), 166 CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
167 CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p), 167 CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
168 CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n), 168 CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
169 CLKDEV_INIT("s3c2440-uart.0", "uart", &s3c24xx_clk_uart0),
170 CLKDEV_INIT("s3c2440-uart.1", "uart", &s3c24xx_clk_uart1),
171 CLKDEV_INIT("s3c2440-uart.2", "uart", &s3c24xx_clk_uart2),
169 CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll), 172 CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll),
170}; 173};
171 174
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index e115f6742107..c5be60d85e4b 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -1162,9 +1162,6 @@ static void __init eva_init(void)
1162 gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */ 1162 gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */
1163 gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */ 1163 gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */
1164 1164
1165 /* Touchscreen */
1166 gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */
1167
1168 /* GETHER */ 1165 /* GETHER */
1169 gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */ 1166 gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */
1170 1167
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index d5554646916c..3354a85c90f7 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -167,7 +167,13 @@ static const struct pinctrl_map bockw_pinctrl_map[] = {
167 "usb1", "usb1"), 167 "usb1", "usb1"),
168 /* SDHI0 */ 168 /* SDHI0 */
169 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", 169 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
170 "sdhi0", "sdhi0"), 170 "sdhi0_data4", "sdhi0"),
171 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
172 "sdhi0_ctrl", "sdhi0"),
173 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
174 "sdhi0_cd", "sdhi0"),
175 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
176 "sdhi0_wp", "sdhi0"),
171}; 177};
172 178
173#define FPGA 0x18200000 179#define FPGA 0x18200000
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index d73e21d3ea8a..8d6bd5c5efb9 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -59,7 +59,7 @@ static __initdata struct gpio_led_platform_data lager_leds_pdata = {
59#define GPIO_KEY(c, g, d, ...) \ 59#define GPIO_KEY(c, g, d, ...) \
60 { .code = c, .gpio = g, .desc = d, .active_low = 1 } 60 { .code = c, .gpio = g, .desc = d, .active_low = 1 }
61 61
62static __initdata struct gpio_keys_button gpio_buttons[] = { 62static struct gpio_keys_button gpio_buttons[] = {
63 GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"), 63 GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"),
64 GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"), 64 GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"),
65 GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"), 65 GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"),
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index d04e3bfe1918..835833e3c4f8 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -11,8 +11,9 @@ menuconfig ARCH_STI
11 select HAVE_SMP 11 select HAVE_SMP
12 select HAVE_ARM_SCU if SMP 12 select HAVE_ARM_SCU if SMP
13 select ARCH_REQUIRE_GPIOLIB 13 select ARCH_REQUIRE_GPIOLIB
14 select ARM_ERRATA_720789
15 select ARM_ERRATA_754322 14 select ARM_ERRATA_754322
15 select ARM_ERRATA_764369
16 select ARM_ERRATA_775420
16 select PL310_ERRATA_753970 if CACHE_PL310 17 select PL310_ERRATA_753970 if CACHE_PL310
17 select PL310_ERRATA_769419 if CACHE_PL310 18 select PL310_ERRATA_769419 if CACHE_PL310
18 help 19 help
diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S
index 78ebc7559f53..4c09bae86edf 100644
--- a/arch/arm/mach-sti/headsmp.S
+++ b/arch/arm/mach-sti/headsmp.S
@@ -16,8 +16,6 @@
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/init.h> 17#include <linux/init.h>
18 18
19 __INIT
20
21/* 19/*
22 * ST specific entry point for secondary CPUs. This provides 20 * ST specific entry point for secondary CPUs. This provides
23 * a "holding pen" into which all secondary cores are held until we're 21 * a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 5b799c29886e..5f252569c689 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -91,7 +91,7 @@ static void __init zynq_map_io(void)
91 zynq_scu_map_io(); 91 zynq_scu_map_io();
92} 92}
93 93
94static void zynq_system_reset(char mode, const char *cmd) 94static void zynq_system_reset(enum reboot_mode mode, const char *cmd)
95{ 95{
96 zynq_slcr_system_reset(); 96 zynq_slcr_system_reset();
97} 97}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 6cacdc8dd654..cd2c88e7a8f7 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -421,24 +421,28 @@ config CPU_32v3
421 select CPU_USE_DOMAINS if MMU 421 select CPU_USE_DOMAINS if MMU
422 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 422 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
423 select TLS_REG_EMUL if SMP || !MMU 423 select TLS_REG_EMUL if SMP || !MMU
424 select NEED_KUSER_HELPERS
424 425
425config CPU_32v4 426config CPU_32v4
426 bool 427 bool
427 select CPU_USE_DOMAINS if MMU 428 select CPU_USE_DOMAINS if MMU
428 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 429 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
429 select TLS_REG_EMUL if SMP || !MMU 430 select TLS_REG_EMUL if SMP || !MMU
431 select NEED_KUSER_HELPERS
430 432
431config CPU_32v4T 433config CPU_32v4T
432 bool 434 bool
433 select CPU_USE_DOMAINS if MMU 435 select CPU_USE_DOMAINS if MMU
434 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 436 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
435 select TLS_REG_EMUL if SMP || !MMU 437 select TLS_REG_EMUL if SMP || !MMU
438 select NEED_KUSER_HELPERS
436 439
437config CPU_32v5 440config CPU_32v5
438 bool 441 bool
439 select CPU_USE_DOMAINS if MMU 442 select CPU_USE_DOMAINS if MMU
440 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 443 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
441 select TLS_REG_EMUL if SMP || !MMU 444 select TLS_REG_EMUL if SMP || !MMU
445 select NEED_KUSER_HELPERS
442 446
443config CPU_32v6 447config CPU_32v6
444 bool 448 bool
@@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE
776 780
777config TLS_REG_EMUL 781config TLS_REG_EMUL
778 bool 782 bool
783 select NEED_KUSER_HELPERS
779 help 784 help
780 An SMP system using a pre-ARMv6 processor (there are apparently 785 An SMP system using a pre-ARMv6 processor (there are apparently
781 a few prototypes like that in existence) and therefore access to 786 a few prototypes like that in existence) and therefore access to
@@ -783,11 +788,43 @@ config TLS_REG_EMUL
783 788
784config NEEDS_SYSCALL_FOR_CMPXCHG 789config NEEDS_SYSCALL_FOR_CMPXCHG
785 bool 790 bool
791 select NEED_KUSER_HELPERS
786 help 792 help
787 SMP on a pre-ARMv6 processor? Well OK then. 793 SMP on a pre-ARMv6 processor? Well OK then.
788 Forget about fast user space cmpxchg support. 794 Forget about fast user space cmpxchg support.
789 It is just not possible. 795 It is just not possible.
790 796
797config NEED_KUSER_HELPERS
798 bool
799
800config KUSER_HELPERS
801 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
802 default y
803 help
804 Warning: disabling this option may break user programs.
805
806 Provide kuser helpers in the vector page. The kernel provides
807 helper code to userspace in read only form at a fixed location
808 in the high vector page to allow userspace to be independent of
809 the CPU type fitted to the system. This permits binaries to be
810 run on ARMv4 through to ARMv7 without modification.
811
812 See Documentation/arm/kernel_user_helpers.txt for details.
813
814 However, the fixed address nature of these helpers can be used
815 by ROP (return orientated programming) authors when creating
816 exploits.
817
818 If all of the binaries and libraries which run on your platform
819 are built specifically for your platform, and make no use of
820 these helpers, then you can turn this option off to hinder
821 such exploits. However, in that case, if a binary or library
822 relying on those helpers is run, it will receive a SIGILL signal,
823 which will terminate the program.
824
825 Say N here only if you are absolutely certain that you do not
826 need these helpers; otherwise, the safe option is to say Y.
827
791config DMA_CACHE_RWFO 828config DMA_CACHE_RWFO
792 bool "Enable read/write for ownership DMA cache maintenance" 829 bool "Enable read/write for ownership DMA cache maintenance"
793 depends on CPU_V6K && SMP 830 depends on CPU_V6K && SMP
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b55b1015724b..4a0544492f10 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -245,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
245 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { 245 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
246 local_flush_bp_all(); 246 local_flush_bp_all();
247 local_flush_tlb_all(); 247 local_flush_tlb_all();
248 dummy_flush_tlb_a15_erratum(); 248 if (erratum_a15_798181())
249 dummy_flush_tlb_a15_erratum();
249 } 250 }
250 251
251 atomic64_set(&per_cpu(active_asids, cpu), asid); 252 atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4f56617a2392..53cdbd39ec8e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
989 989
990void __init sanity_check_meminfo(void) 990void __init sanity_check_meminfo(void)
991{ 991{
992 phys_addr_t memblock_limit = 0;
992 int i, j, highmem = 0; 993 int i, j, highmem = 0;
993 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; 994 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
994 995
@@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void)
1052 bank->size = size_limit; 1053 bank->size = size_limit;
1053 } 1054 }
1054#endif 1055#endif
1055 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) 1056 if (!bank->highmem) {
1056 arm_lowmem_limit = bank->start + bank->size; 1057 phys_addr_t bank_end = bank->start + bank->size;
1057 1058
1059 if (bank_end > arm_lowmem_limit)
1060 arm_lowmem_limit = bank_end;
1061
1062 /*
1063 * Find the first non-section-aligned page, and point
1064 * memblock_limit at it. This relies on rounding the
1065 * limit down to be section-aligned, which happens at
1066 * the end of this function.
1067 *
1068 * With this algorithm, the start or end of almost any
1069 * bank can be non-section-aligned. The only exception
1070 * is that the start of the bank 0 must be section-
1071 * aligned, since otherwise memory would need to be
1072 * allocated when mapping the start of bank 0, which
1073 * occurs before any free memory is mapped.
1074 */
1075 if (!memblock_limit) {
1076 if (!IS_ALIGNED(bank->start, SECTION_SIZE))
1077 memblock_limit = bank->start;
1078 else if (!IS_ALIGNED(bank_end, SECTION_SIZE))
1079 memblock_limit = bank_end;
1080 }
1081 }
1058 j++; 1082 j++;
1059 } 1083 }
1060#ifdef CONFIG_HIGHMEM 1084#ifdef CONFIG_HIGHMEM
@@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void)
1079#endif 1103#endif
1080 meminfo.nr_banks = j; 1104 meminfo.nr_banks = j;
1081 high_memory = __va(arm_lowmem_limit - 1) + 1; 1105 high_memory = __va(arm_lowmem_limit - 1) + 1;
1082 memblock_set_current_limit(arm_lowmem_limit); 1106
1107 /*
1108 * Round the memblock limit down to a section size. This
1109 * helps to ensure that we will allocate memory from the
1110 * last full section, which should be mapped.
1111 */
1112 if (memblock_limit)
1113 memblock_limit = round_down(memblock_limit, SECTION_SIZE);
1114 if (!memblock_limit)
1115 memblock_limit = arm_lowmem_limit;
1116
1117 memblock_set_current_limit(memblock_limit);
1083} 1118}
1084 1119
1085static inline void prepare_page_table(void) 1120static inline void prepare_page_table(void)
@@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1160 /* 1195 /*
1161 * Allocate the vector page early. 1196 * Allocate the vector page early.
1162 */ 1197 */
1163 vectors = early_alloc(PAGE_SIZE); 1198 vectors = early_alloc(PAGE_SIZE * 2);
1164 1199
1165 early_trap_init(vectors); 1200 early_trap_init(vectors);
1166 1201
@@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1205 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 1240 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1206 map.virtual = 0xffff0000; 1241 map.virtual = 0xffff0000;
1207 map.length = PAGE_SIZE; 1242 map.length = PAGE_SIZE;
1243#ifdef CONFIG_KUSER_HELPERS
1208 map.type = MT_HIGH_VECTORS; 1244 map.type = MT_HIGH_VECTORS;
1245#else
1246 map.type = MT_LOW_VECTORS;
1247#endif
1209 create_mapping(&map); 1248 create_mapping(&map);
1210 1249
1211 if (!vectors_high()) { 1250 if (!vectors_high()) {
1212 map.virtual = 0; 1251 map.virtual = 0;
1252 map.length = PAGE_SIZE * 2;
1213 map.type = MT_LOW_VECTORS; 1253 map.type = MT_LOW_VECTORS;
1214 create_mapping(&map); 1254 create_mapping(&map);
1215 } 1255 }
1216 1256
1257 /* Now create a kernel read-only mapping */
1258 map.pfn += 1;
1259 map.virtual = 0xffff0000 + PAGE_SIZE;
1260 map.length = PAGE_SIZE;
1261 map.type = MT_LOW_VECTORS;
1262 create_mapping(&map);
1263
1217 /* 1264 /*
1218 * Ask the machine support to map in the statically mapped devices. 1265 * Ask the machine support to map in the statically mapped devices.
1219 */ 1266 */
@@ -1276,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc)
1276{ 1323{
1277 void *zero_page; 1324 void *zero_page;
1278 1325
1279 memblock_set_current_limit(arm_lowmem_limit);
1280
1281 build_mem_type_table(); 1326 build_mem_type_table();
1282 prepare_page_table(); 1327 prepare_page_table();
1283 map_lowmem(); 1328 map_lowmem();
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index f64afb9f1bd5..bdd3be4be77a 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext)
110 ARM( str r3, [r0, #2048]! ) 110 ARM( str r3, [r0, #2048]! )
111 THUMB( add r0, r0, #2048 ) 111 THUMB( add r0, r0, #2048 )
112 THUMB( str r3, [r0] ) 112 THUMB( str r3, [r0] )
113 ALT_SMP(mov pc,lr) 113 ALT_SMP(W(nop))
114 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 114 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
115#endif 115#endif
116 mov pc, lr 116 mov pc, lr
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index c36ac69488c8..01a719e18bb0 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext)
81 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY 81 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
82 orreq r2, #L_PTE_RDONLY 82 orreq r2, #L_PTE_RDONLY
831: strd r2, r3, [r0] 831: strd r2, r3, [r0]
84 ALT_SMP(mov pc, lr) 84 ALT_SMP(W(nop))
85 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 85 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
86#endif 86#endif
87 mov pc, lr 87 mov pc, lr
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 5c6d5a3050ea..73398bcf9bd8 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle)
75ENDPROC(cpu_v7_do_idle) 75ENDPROC(cpu_v7_do_idle)
76 76
77ENTRY(cpu_v7_dcache_clean_area) 77ENTRY(cpu_v7_dcache_clean_area)
78 ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW 78 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
79 ALT_UP(W(nop)) 79 ALT_UP_B(1f)
80 dcache_line_size r2, r3 80 mov pc, lr
811: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 811: dcache_line_size r2, r3
822: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
82 add r0, r0, r2 83 add r0, r0, r2
83 subs r1, r1, r2 84 subs r1, r1, r2
84 bhi 1b 85 bhi 2b
85 dsb 86 dsb
86 mov pc, lr 87 mov pc, lr
87ENDPROC(cpu_v7_dcache_clean_area) 88ENDPROC(cpu_v7_dcache_clean_area)
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 3dc5cbea86cc..a5b5ff6e68d2 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -29,6 +29,13 @@ config PLAT_S5P
29 help 29 help
30 Base platform code for Samsung's S5P series SoC. 30 Base platform code for Samsung's S5P series SoC.
31 31
32config SAMSUNG_PM
33 bool
34 depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || S5P_PM)
35 default y
36 help
37 Base platform power management code for samsung code
38
32if PLAT_SAMSUNG 39if PLAT_SAMSUNG
33 40
34# boot configurations 41# boot configurations
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 98d07d8fc7a7..199bbe304d02 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -51,7 +51,7 @@ obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o
51 51
52# PM support 52# PM support
53 53
54obj-$(CONFIG_PM) += pm.o 54obj-$(CONFIG_SAMSUNG_PM) += pm.o
55obj-$(CONFIG_SAMSUNG_PM_GPIO) += pm-gpio.o 55obj-$(CONFIG_SAMSUNG_PM_GPIO) += pm-gpio.o
56obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o 56obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o
57 57
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index a62753dc15ba..df45d6edc98d 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -83,6 +83,11 @@ extern struct clk clk_ext;
83extern struct clksrc_clk clk_epllref; 83extern struct clksrc_clk clk_epllref;
84extern struct clksrc_clk clk_esysclk; 84extern struct clksrc_clk clk_esysclk;
85 85
86/* S3C24XX UART clocks */
87extern struct clk s3c24xx_clk_uart0;
88extern struct clk s3c24xx_clk_uart1;
89extern struct clk s3c24xx_clk_uart2;
90
86/* S3C64XX specific clocks */ 91/* S3C64XX specific clocks */
87extern struct clk clk_h2; 92extern struct clk clk_h2;
88extern struct clk clk_27m; 93extern struct clk clk_27m;
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 5d47ca35cabd..6bc1a8f471e3 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -19,7 +19,7 @@
19 19
20struct device; 20struct device;
21 21
22#ifdef CONFIG_PM 22#ifdef CONFIG_SAMSUNG_PM
23 23
24extern __init int s3c_pm_init(void); 24extern __init int s3c_pm_init(void);
25extern __init int s3c64xx_pm_init(void); 25extern __init int s3c64xx_pm_init(void);
@@ -58,8 +58,6 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */
58 58
59/* from sleep.S */ 59/* from sleep.S */
60 60
61extern void s3c_cpu_resume(void);
62
63extern int s3c2410_cpu_suspend(unsigned long); 61extern int s3c2410_cpu_suspend(unsigned long);
64 62
65/* sleep save info */ 63/* sleep save info */
@@ -106,12 +104,14 @@ extern void s3c_pm_do_save(struct sleep_save *ptr, int count);
106extern void s3c_pm_do_restore(struct sleep_save *ptr, int count); 104extern void s3c_pm_do_restore(struct sleep_save *ptr, int count);
107extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count); 105extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count);
108 106
109#ifdef CONFIG_PM 107#ifdef CONFIG_SAMSUNG_PM
110extern int s3c_irq_wake(struct irq_data *data, unsigned int state); 108extern int s3c_irq_wake(struct irq_data *data, unsigned int state);
111extern int s3c_irqext_wake(struct irq_data *data, unsigned int state); 109extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
110extern void s3c_cpu_resume(void);
112#else 111#else
113#define s3c_irq_wake NULL 112#define s3c_irq_wake NULL
114#define s3c_irqext_wake NULL 113#define s3c_irqext_wake NULL
114#define s3c_cpu_resume NULL
115#endif 115#endif
116 116
117/* PM debug functions */ 117/* PM debug functions */
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c
index 3e5c4619caa5..50a3ea0037db 100644
--- a/arch/arm/plat-samsung/init.c
+++ b/arch/arm/plat-samsung/init.c
@@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode,
55 55
56 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); 56 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
57 57
58 if (cpu->map_io == NULL || cpu->init == NULL) { 58 if (cpu->init == NULL) {
59 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); 59 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
60 panic("Unsupported Samsung CPU"); 60 panic("Unsupported Samsung CPU");
61 } 61 }
62 62
63 cpu->map_io(); 63 if (cpu->map_io)
64 cpu->map_io();
64} 65}
65 66
66/* s3c24xx_init_clocks 67/* s3c24xx_init_clocks
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index ea3613642451..d0c23010b693 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -80,7 +80,7 @@ unsigned char pm_uart_udivslot;
80 80
81#ifdef CONFIG_SAMSUNG_PM_DEBUG 81#ifdef CONFIG_SAMSUNG_PM_DEBUG
82 82
83static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; 83static struct pm_uart_save uart_save;
84 84
85static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save) 85static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
86{ 86{
@@ -101,11 +101,7 @@ static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
101 101
102static void s3c_pm_save_uarts(void) 102static void s3c_pm_save_uarts(void)
103{ 103{
104 struct pm_uart_save *save = uart_save; 104 s3c_pm_save_uart(CONFIG_DEBUG_S3C_UART, &uart_save);
105 unsigned int uart;
106
107 for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
108 s3c_pm_save_uart(uart, save);
109} 105}
110 106
111static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save) 107static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
@@ -126,11 +122,7 @@ static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
126 122
127static void s3c_pm_restore_uarts(void) 123static void s3c_pm_restore_uarts(void)
128{ 124{
129 struct pm_uart_save *save = uart_save; 125 s3c_pm_restore_uart(CONFIG_DEBUG_S3C_UART, &uart_save);
130 unsigned int uart;
131
132 for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
133 s3c_pm_restore_uart(uart, save);
134} 126}
135#else 127#else
136static void s3c_pm_save_uarts(void) { } 128static void s3c_pm_save_uarts(void) { }
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f71c37edca26..8a6295c86209 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -170,9 +170,10 @@ static void __init xen_percpu_init(void *unused)
170 per_cpu(xen_vcpu, cpu) = vcpup; 170 per_cpu(xen_vcpu, cpu) = vcpup;
171 171
172 enable_percpu_irq(xen_events_irq, 0); 172 enable_percpu_irq(xen_events_irq, 0);
173 put_cpu();
173} 174}
174 175
175static void xen_restart(char str, const char *cmd) 176static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
176{ 177{
177 struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; 178 struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
178 int rc; 179 int rc;
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index c92de4163eba..b25763bc0ec4 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -42,14 +42,15 @@
42#define TPIDR_EL1 18 /* Thread ID, Privileged */ 42#define TPIDR_EL1 18 /* Thread ID, Privileged */
43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ 43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ 44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
45#define PAR_EL1 21 /* Physical Address Register */
45/* 32bit specific registers. Keep them at the end of the range */ 46/* 32bit specific registers. Keep them at the end of the range */
46#define DACR32_EL2 21 /* Domain Access Control Register */ 47#define DACR32_EL2 22 /* Domain Access Control Register */
47#define IFSR32_EL2 22 /* Instruction Fault Status Register */ 48#define IFSR32_EL2 23 /* Instruction Fault Status Register */
48#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ 49#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
49#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ 50#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
50#define TEECR32_EL1 25 /* ThumbEE Configuration Register */ 51#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
51#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ 52#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
52#define NR_SYS_REGS 27 53#define NR_SYS_REGS 28
53 54
54/* 32bit mapping */ 55/* 32bit mapping */
55#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ 56#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -69,6 +70,8 @@
69#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ 70#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
70#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ 71#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
71#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ 72#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
73#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
74#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
72#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ 75#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
73#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ 76#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
74#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ 77#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 644d73956864..0859a4ddd1e7 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -129,7 +129,7 @@ struct kvm_vcpu_arch {
129 struct kvm_mmu_memory_cache mmu_page_cache; 129 struct kvm_mmu_memory_cache mmu_page_cache;
130 130
131 /* Target CPU and feature flags */ 131 /* Target CPU and feature flags */
132 u32 target; 132 int target;
133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
134 134
135 /* Detect first run of a vcpu */ 135 /* Detect first run of a vcpu */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 3659e460071d..23a3c4791d86 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -24,10 +24,10 @@
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25 25
26#ifndef CONFIG_ARM64_64K_PAGES 26#ifndef CONFIG_ARM64_64K_PAGES
27#define THREAD_SIZE_ORDER 1 27#define THREAD_SIZE_ORDER 2
28#endif 28#endif
29 29
30#define THREAD_SIZE 8192 30#define THREAD_SIZE 16384
31#define THREAD_START_SP (THREAD_SIZE - 16) 31#define THREAD_START_SP (THREAD_SIZE - 16)
32 32
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 46b3beb4b773..717031a762c2 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -35,6 +35,7 @@ struct mmu_gather {
35 struct mm_struct *mm; 35 struct mm_struct *mm;
36 unsigned int fullmm; 36 unsigned int fullmm;
37 struct vm_area_struct *vma; 37 struct vm_area_struct *vma;
38 unsigned long start, end;
38 unsigned long range_start; 39 unsigned long range_start;
39 unsigned long range_end; 40 unsigned long range_end;
40 unsigned int nr; 41 unsigned int nr;
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
97} 98}
98 99
99static inline void 100static inline void
100tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) 101tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
101{ 102{
102 tlb->mm = mm; 103 tlb->mm = mm;
103 tlb->fullmm = fullmm; 104 tlb->fullmm = !(start | (end+1));
105 tlb->start = start;
106 tlb->end = end;
104 tlb->vma = NULL; 107 tlb->vma = NULL;
105 tlb->max = ARRAY_SIZE(tlb->local); 108 tlb->max = ARRAY_SIZE(tlb->local);
106 tlb->pages = tlb->local; 109 tlb->pages = tlb->local;
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 439827271e3d..26e310c54344 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -21,6 +21,7 @@
21#define BOOT_CPU_MODE_EL2 (0x0e12b007) 21#define BOOT_CPU_MODE_EL2 (0x0e12b007)
22 22
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24#include <asm/cacheflush.h>
24 25
25/* 26/*
26 * __boot_cpu_mode records what mode CPUs were booted in. 27 * __boot_cpu_mode records what mode CPUs were booted in.
@@ -36,9 +37,20 @@ extern u32 __boot_cpu_mode[2];
36void __hyp_set_vectors(phys_addr_t phys_vector_base); 37void __hyp_set_vectors(phys_addr_t phys_vector_base);
37phys_addr_t __hyp_get_vectors(void); 38phys_addr_t __hyp_get_vectors(void);
38 39
40static inline void sync_boot_mode(void)
41{
42 /*
43 * As secondaries write to __boot_cpu_mode with caches disabled, we
44 * must flush the corresponding cache entries to ensure the visibility
45 * of their writes.
46 */
47 __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode));
48}
49
39/* Reports the availability of HYP mode */ 50/* Reports the availability of HYP mode */
40static inline bool is_hyp_mode_available(void) 51static inline bool is_hyp_mode_available(void)
41{ 52{
53 sync_boot_mode();
42 return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && 54 return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
43 __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); 55 __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
44} 56}
@@ -46,6 +58,7 @@ static inline bool is_hyp_mode_available(void)
46/* Check if the bootloader has booted CPUs in different modes */ 58/* Check if the bootloader has booted CPUs in different modes */
47static inline bool is_hyp_mode_mismatched(void) 59static inline bool is_hyp_mode_mismatched(void)
48{ 60{
61 sync_boot_mode();
49 return __boot_cpu_mode[0] != __boot_cpu_mode[1]; 62 return __boot_cpu_mode[0] != __boot_cpu_mode[1];
50} 63}
51 64
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1d1314280a03..6ad781b21c08 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -121,7 +121,7 @@
121 121
122 .macro get_thread_info, rd 122 .macro get_thread_info, rd
123 mov \rd, sp 123 mov \rd, sp
124 and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack 124 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
125 .endm 125 .endm
126 126
127/* 127/*
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c40cdf8..12e6ccb88691 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
107static int 107static int
108armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 108armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
109{ 109{
110 int mapping = (*event_map)[config]; 110 int mapping;
111
112 if (config >= PERF_COUNT_HW_MAX)
113 return -EINVAL;
114
115 mapping = (*event_map)[config];
111 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 116 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
112} 117}
113 118
@@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events,
317 struct hw_perf_event fake_event = event->hw; 322 struct hw_perf_event fake_event = event->hw;
318 struct pmu *leader_pmu = event->group_leader->pmu; 323 struct pmu *leader_pmu = event->group_leader->pmu;
319 324
325 if (is_software_event(event))
326 return 1;
327
320 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) 328 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
321 return 1; 329 return 1;
322 330
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 1788bf6b471f..57fb55c44c90 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -81,7 +81,7 @@ void soft_restart(unsigned long addr)
81void (*pm_power_off)(void); 81void (*pm_power_off)(void);
82EXPORT_SYMBOL_GPL(pm_power_off); 82EXPORT_SYMBOL_GPL(pm_power_off);
83 83
84void (*arm_pm_restart)(char str, const char *cmd); 84void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
85EXPORT_SYMBOL_GPL(arm_pm_restart); 85EXPORT_SYMBOL_GPL(arm_pm_restart);
86 86
87void arch_cpu_idle_prepare(void) 87void arch_cpu_idle_prepare(void)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index ff985e3d8b72..1ac0bbbdddb2 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -214,6 +214,7 @@ __kvm_hyp_code_start:
214 mrs x21, tpidr_el1 214 mrs x21, tpidr_el1
215 mrs x22, amair_el1 215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1 216 mrs x23, cntkctl_el1
217 mrs x24, par_el1
217 218
218 stp x4, x5, [x3] 219 stp x4, x5, [x3]
219 stp x6, x7, [x3, #16] 220 stp x6, x7, [x3, #16]
@@ -225,6 +226,7 @@ __kvm_hyp_code_start:
225 stp x18, x19, [x3, #112] 226 stp x18, x19, [x3, #112]
226 stp x20, x21, [x3, #128] 227 stp x20, x21, [x3, #128]
227 stp x22, x23, [x3, #144] 228 stp x22, x23, [x3, #144]
229 str x24, [x3, #160]
228.endm 230.endm
229 231
230.macro restore_sysregs 232.macro restore_sysregs
@@ -243,6 +245,7 @@ __kvm_hyp_code_start:
243 ldp x18, x19, [x3, #112] 245 ldp x18, x19, [x3, #112]
244 ldp x20, x21, [x3, #128] 246 ldp x20, x21, [x3, #128]
245 ldp x22, x23, [x3, #144] 247 ldp x22, x23, [x3, #144]
248 ldr x24, [x3, #160]
246 249
247 msr vmpidr_el2, x4 250 msr vmpidr_el2, x4
248 msr csselr_el1, x5 251 msr csselr_el1, x5
@@ -264,6 +267,7 @@ __kvm_hyp_code_start:
264 msr tpidr_el1, x21 267 msr tpidr_el1, x21
265 msr amair_el1, x22 268 msr amair_el1, x22
266 msr cntkctl_el1, x23 269 msr cntkctl_el1, x23
270 msr par_el1, x24
267.endm 271.endm
268 272
269.macro skip_32bit_state tmp, target 273.macro skip_32bit_state tmp, target
@@ -600,6 +604,8 @@ END(__kvm_vcpu_run)
600 604
601// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 605// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
602ENTRY(__kvm_tlb_flush_vmid_ipa) 606ENTRY(__kvm_tlb_flush_vmid_ipa)
607 dsb ishst
608
603 kern_hyp_va x0 609 kern_hyp_va x0
604 ldr x2, [x0, #KVM_VTTBR] 610 ldr x2, [x0, #KVM_VTTBR]
605 msr vttbr_el2, x2 611 msr vttbr_el2, x2
@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
621ENDPROC(__kvm_tlb_flush_vmid_ipa) 627ENDPROC(__kvm_tlb_flush_vmid_ipa)
622 628
623ENTRY(__kvm_flush_vm_context) 629ENTRY(__kvm_flush_vm_context)
630 dsb ishst
624 tlbi alle1is 631 tlbi alle1is
625 ic ialluis 632 ic ialluis
626 dsb sy 633 dsb sy
@@ -753,6 +760,10 @@ el1_trap:
753 */ 760 */
754 tbnz x1, #7, 1f // S1PTW is set 761 tbnz x1, #7, 1f // S1PTW is set
755 762
763 /* Preserve PAR_EL1 */
764 mrs x3, par_el1
765 push x3, xzr
766
756 /* 767 /*
757 * Permission fault, HPFAR_EL2 is invalid. 768 * Permission fault, HPFAR_EL2 is invalid.
758 * Resolve the IPA the hard way using the guest VA. 769 * Resolve the IPA the hard way using the guest VA.
@@ -766,6 +777,8 @@ el1_trap:
766 777
767 /* Read result */ 778 /* Read result */
768 mrs x3, par_el1 779 mrs x3, par_el1
780 pop x0, xzr // Restore PAR_EL1 from the stack
781 msr par_el1, x0
769 tbnz x3, #0, 3f // Bail out if we failed the translation 782 tbnz x3, #0, 3f // Bail out if we failed the translation
770 ubfx x3, x3, #12, #36 // Extract IPA 783 ubfx x3, x3, #12, #36 // Extract IPA
771 lsl x3, x3, #4 // and present it like HPFAR 784 lsl x3, x3, #4 // and present it like HPFAR
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 94923609753b..02e9d09e1d80 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
211 /* FAR_EL1 */ 211 /* FAR_EL1 */
212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), 212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
213 NULL, reset_unknown, FAR_EL1 }, 213 NULL, reset_unknown, FAR_EL1 },
214 /* PAR_EL1 */
215 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
216 NULL, reset_unknown, PAR_EL1 },
214 217
215 /* PMINTENSET_EL1 */ 218 /* PMINTENSET_EL1 */
216 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), 219 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index f91431963452..7de083d19b7e 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -150,7 +150,6 @@ static struct ac97c_platform_data __initdata ac97c0_data = {
150static struct platform_device rmt_ts_device = { 150static struct platform_device rmt_ts_device = {
151 .name = "ucb1400_ts", 151 .name = "ucb1400_ts",
152 .id = -1, 152 .id = -1,
153 }
154}; 153};
155#endif 154#endif
156 155
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 33a97929d055..77d442ab28c8 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz"
158endmenu 158endmenu
159 159
160source "init/Kconfig" 160source "init/Kconfig"
161source "kernel/Kconfig.freezer"
161source "drivers/Kconfig" 162source "drivers/Kconfig"
162source "fs/Kconfig" 163source "fs/Kconfig"
163 164
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 7913695b2fcb..efbd2929aeb7 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_FAN=m
31CONFIG_ACPI_DOCK=y 31CONFIG_ACPI_DOCK=y
32CONFIG_ACPI_PROCESSOR=m 32CONFIG_ACPI_PROCESSOR=m
33CONFIG_ACPI_CONTAINER=m 33CONFIG_ACPI_CONTAINER=m
34CONFIG_HOTPLUG_PCI=m 34CONFIG_HOTPLUG_PCI=y
35CONFIG_HOTPLUG_PCI_ACPI=m 35CONFIG_HOTPLUG_PCI_ACPI=m
36CONFIG_PACKET=y 36CONFIG_PACKET=y
37CONFIG_UNIX=y 37CONFIG_UNIX=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index f8e913365423..f64980dd20c3 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -25,7 +25,7 @@ CONFIG_ACPI_BUTTON=m
25CONFIG_ACPI_FAN=m 25CONFIG_ACPI_FAN=m
26CONFIG_ACPI_PROCESSOR=m 26CONFIG_ACPI_PROCESSOR=m
27CONFIG_ACPI_CONTAINER=m 27CONFIG_ACPI_CONTAINER=m
28CONFIG_HOTPLUG_PCI=m 28CONFIG_HOTPLUG_PCI=y
29CONFIG_HOTPLUG_PCI_ACPI=m 29CONFIG_HOTPLUG_PCI_ACPI=m
30CONFIG_PACKET=y 30CONFIG_PACKET=y
31CONFIG_UNIX=y 31CONFIG_UNIX=y
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index a5a9e02e60a0..0f4e9e41f130 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_BUTTON=m
31CONFIG_ACPI_FAN=m 31CONFIG_ACPI_FAN=m
32CONFIG_ACPI_PROCESSOR=m 32CONFIG_ACPI_PROCESSOR=m
33CONFIG_ACPI_CONTAINER=m 33CONFIG_ACPI_CONTAINER=m
34CONFIG_HOTPLUG_PCI=m 34CONFIG_HOTPLUG_PCI=y
35CONFIG_HOTPLUG_PCI_ACPI=m 35CONFIG_HOTPLUG_PCI_ACPI=m
36CONFIG_PACKET=y 36CONFIG_PACKET=y
37CONFIG_UNIX=y 37CONFIG_UNIX=y
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig
index 37b9b422caad..b025acfde5c1 100644
--- a/arch/ia64/configs/xen_domu_defconfig
+++ b/arch/ia64/configs/xen_domu_defconfig
@@ -32,7 +32,7 @@ CONFIG_ACPI_BUTTON=m
32CONFIG_ACPI_FAN=m 32CONFIG_ACPI_FAN=m
33CONFIG_ACPI_PROCESSOR=m 33CONFIG_ACPI_PROCESSOR=m
34CONFIG_ACPI_CONTAINER=m 34CONFIG_ACPI_CONTAINER=m
35CONFIG_HOTPLUG_PCI=m 35CONFIG_HOTPLUG_PCI=y
36CONFIG_HOTPLUG_PCI_ACPI=m 36CONFIG_HOTPLUG_PCI_ACPI=m
37CONFIG_PACKET=y 37CONFIG_PACKET=y
38CONFIG_UNIX=y 38CONFIG_UNIX=y
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index ef3a9de01954..bc5efc7c3f3f 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -22,7 +22,7 @@
22 * unmapping a portion of the virtual address space, these hooks are called according to 22 * unmapping a portion of the virtual address space, these hooks are called according to
23 * the following template: 23 * the following template:
24 * 24 *
25 * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM 25 * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
26 * { 26 * {
27 * for each vma that needs a shootdown do { 27 * for each vma that needs a shootdown do {
28 * tlb_start_vma(tlb, vma); 28 * tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@ struct mmu_gather {
58 unsigned int max; 58 unsigned int max;
59 unsigned char fullmm; /* non-zero means full mm flush */ 59 unsigned char fullmm; /* non-zero means full mm flush */
60 unsigned char need_flush; /* really unmapped some PTEs? */ 60 unsigned char need_flush; /* really unmapped some PTEs? */
61 unsigned long start, end;
61 unsigned long start_addr; 62 unsigned long start_addr;
62 unsigned long end_addr; 63 unsigned long end_addr;
63 struct page **pages; 64 struct page **pages;
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
155 156
156 157
157static inline void 158static inline void
158tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 159tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
159{ 160{
160 tlb->mm = mm; 161 tlb->mm = mm;
161 tlb->max = ARRAY_SIZE(tlb->local); 162 tlb->max = ARRAY_SIZE(tlb->local);
162 tlb->pages = tlb->local; 163 tlb->pages = tlb->local;
163 tlb->nr = 0; 164 tlb->nr = 0;
164 tlb->fullmm = full_mm_flush; 165 tlb->fullmm = !(start | (end+1));
166 tlb->start = start;
167 tlb->end = end;
165 tlb->start_addr = ~0UL; 168 tlb->start_addr = ~0UL;
166} 169}
167 170
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
index 2291a7d69d49..fa277aecfb78 100644
--- a/arch/m68k/emu/natfeat.c
+++ b/arch/m68k/emu/natfeat.c
@@ -18,9 +18,11 @@
18#include <asm/machdep.h> 18#include <asm/machdep.h>
19#include <asm/natfeat.h> 19#include <asm/natfeat.h>
20 20
21extern long nf_get_id2(const char *feature_name);
22
21asm("\n" 23asm("\n"
22" .global nf_get_id,nf_call\n" 24" .global nf_get_id2,nf_call\n"
23"nf_get_id:\n" 25"nf_get_id2:\n"
24" .short 0x7300\n" 26" .short 0x7300\n"
25" rts\n" 27" rts\n"
26"nf_call:\n" 28"nf_call:\n"
@@ -29,12 +31,25 @@ asm("\n"
29"1: moveq.l #0,%d0\n" 31"1: moveq.l #0,%d0\n"
30" rts\n" 32" rts\n"
31" .section __ex_table,\"a\"\n" 33" .section __ex_table,\"a\"\n"
32" .long nf_get_id,1b\n" 34" .long nf_get_id2,1b\n"
33" .long nf_call,1b\n" 35" .long nf_call,1b\n"
34" .previous"); 36" .previous");
35EXPORT_SYMBOL_GPL(nf_get_id);
36EXPORT_SYMBOL_GPL(nf_call); 37EXPORT_SYMBOL_GPL(nf_call);
37 38
39long nf_get_id(const char *feature_name)
40{
41 /* feature_name may be in vmalloc()ed memory, so make a copy */
42 char name_copy[32];
43 size_t n;
44
45 n = strlcpy(name_copy, feature_name, sizeof(name_copy));
46 if (n >= sizeof(name_copy))
47 return 0;
48
49 return nf_get_id2(name_copy);
50}
51EXPORT_SYMBOL_GPL(nf_get_id);
52
38void nfprint(const char *fmt, ...) 53void nfprint(const char *fmt, ...)
39{ 54{
40 static char buf[256]; 55 static char buf[256];
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index 444ea8a09e9f..ef881cfbbca9 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -15,16 +15,17 @@
15 unsigned long long n64; \ 15 unsigned long long n64; \
16 } __n; \ 16 } __n; \
17 unsigned long __rem, __upper; \ 17 unsigned long __rem, __upper; \
18 unsigned long __base = (base); \
18 \ 19 \
19 __n.n64 = (n); \ 20 __n.n64 = (n); \
20 if ((__upper = __n.n32[0])) { \ 21 if ((__upper = __n.n32[0])) { \
21 asm ("divul.l %2,%1:%0" \ 22 asm ("divul.l %2,%1:%0" \
22 : "=d" (__n.n32[0]), "=d" (__upper) \ 23 : "=d" (__n.n32[0]), "=d" (__upper) \
23 : "d" (base), "0" (__n.n32[0])); \ 24 : "d" (__base), "0" (__n.n32[0])); \
24 } \ 25 } \
25 asm ("divu.l %2,%1:%0" \ 26 asm ("divu.l %2,%1:%0" \
26 : "=d" (__n.n32[1]), "=d" (__rem) \ 27 : "=d" (__n.n32[1]), "=d" (__rem) \
27 : "d" (base), "1" (__upper), "0" (__n.n32[1])); \ 28 : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
28 (n) = __n.n64; \ 29 (n) = __n.n64; \
29 __rem; \ 30 __rem; \
30}) 31})
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d22a4ecffff4..4fab52294d98 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,7 +28,7 @@ config MICROBLAZE
28 select GENERIC_CLOCKEVENTS 28 select GENERIC_CLOCKEVENTS
29 select GENERIC_IDLE_POLL_SETUP 29 select GENERIC_IDLE_POLL_SETUP
30 select MODULES_USE_ELF_RELA 30 select MODULES_USE_ELF_RELA
31 select CLONE_BACKWARDS 31 select CLONE_BACKWARDS3
32 32
33config SWAP 33config SWAP
34 def_bool n 34 def_bool n
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c3abed332301..e12764c2a9d0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -114,6 +114,7 @@ config BCM47XX
114 select FW_CFE 114 select FW_CFE
115 select HW_HAS_PCI 115 select HW_HAS_PCI
116 select IRQ_CPU 116 select IRQ_CPU
117 select SYS_HAS_CPU_MIPS32_R1
117 select NO_EXCEPT_FILL 118 select NO_EXCEPT_FILL
118 select SYS_SUPPORTS_32BIT_KERNEL 119 select SYS_SUPPORTS_32BIT_KERNEL
119 select SYS_SUPPORTS_LITTLE_ENDIAN 120 select SYS_SUPPORTS_LITTLE_ENDIAN
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index ba611927749b..2b8b118398c4 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -2,7 +2,6 @@ if BCM47XX
2 2
3config BCM47XX_SSB 3config BCM47XX_SSB
4 bool "SSB Support for Broadcom BCM47XX" 4 bool "SSB Support for Broadcom BCM47XX"
5 select SYS_HAS_CPU_MIPS32_R1
6 select SSB 5 select SSB
7 select SSB_DRIVER_MIPS 6 select SSB_DRIVER_MIPS
8 select SSB_DRIVER_EXTIF 7 select SSB_DRIVER_EXTIF
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 1dc086087a72..fa44f3ec5302 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -17,6 +17,8 @@
17#define current_cpu_type() current_cpu_data.cputype 17#define current_cpu_type() current_cpu_data.cputype
18#endif 18#endif
19 19
20#define boot_cpu_type() cpu_data[0].cputype
21
20/* 22/*
21 * SMP assumption: Options of CPU 0 are a superset of all processors. 23 * SMP assumption: Options of CPU 0 are a superset of all processors.
22 * This is true for all known MIPS systems. 24 * This is true for all known MIPS systems.
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 5b2f2e68e57f..9488fa5f8866 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -25,8 +25,12 @@
25#else 25#else
26#define CAC_BASE _AC(0x80000000, UL) 26#define CAC_BASE _AC(0x80000000, UL)
27#endif 27#endif
28#ifndef IO_BASE
28#define IO_BASE _AC(0xa0000000, UL) 29#define IO_BASE _AC(0xa0000000, UL)
30#endif
31#ifndef UNCAC_BASE
29#define UNCAC_BASE _AC(0xa0000000, UL) 32#define UNCAC_BASE _AC(0xa0000000, UL)
33#endif
30 34
31#ifndef MAP_BASE 35#ifndef MAP_BASE
32#ifdef CONFIG_KVM_GUEST 36#ifdef CONFIG_KVM_GUEST
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index b7a23064841f..88e292b7719e 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -25,11 +25,12 @@ struct siginfo;
25/* 25/*
26 * Careful to keep union _sifields from shifting ... 26 * Careful to keep union _sifields from shifting ...
27 */ 27 */
28#if __SIZEOF_LONG__ == 4 28#if _MIPS_SZLONG == 32
29#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) 29#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
30#endif 30#elif _MIPS_SZLONG == 64
31#if __SIZEOF_LONG__ == 8
32#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) 31#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
32#else
33#error _MIPS_SZLONG neither 32 nor 64
33#endif 34#endif
34 35
35#include <asm-generic/siginfo.h> 36#include <asm-generic/siginfo.h>
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index f739aedcb509..bd79c4f9bff4 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -54,7 +54,11 @@ LEAF(bmips_smp_movevec)
54 /* set up CPU1 CBR; move BASE to 0xa000_0000 */ 54 /* set up CPU1 CBR; move BASE to 0xa000_0000 */
55 li k0, 0xff400000 55 li k0, 0xff400000
56 mtc0 k0, $22, 6 56 mtc0 k0, $22, 6
57 li k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_1 57 /* set up relocation vector address based on thread ID */
58 mfc0 k1, $22, 3
59 srl k1, 16
60 andi k1, 0x8000
61 or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
58 or k0, k1 62 or k0, k1
59 li k1, 0xa0080000 63 li k1, 0xa0080000
60 sw k1, 0(k0) 64 sw k1, 0(k0)
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index c0bb4d59076a..126da74d4c55 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -66,6 +66,8 @@ static void __init bmips_smp_setup(void)
66 int i, cpu = 1, boot_cpu = 0; 66 int i, cpu = 1, boot_cpu = 0;
67 67
68#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 68#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
69 int cpu_hw_intr;
70
69 /* arbitration priority */ 71 /* arbitration priority */
70 clear_c0_brcm_cmt_ctrl(0x30); 72 clear_c0_brcm_cmt_ctrl(0x30);
71 73
@@ -79,15 +81,13 @@ static void __init bmips_smp_setup(void)
79 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread 81 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
80 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output 82 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
81 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output 83 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
82 *
83 * If booting from TP1, leave the existing CMT interrupt routing
84 * such that TP0 responds to SW1 and TP1 responds to SW0.
85 */ 84 */
86 if (boot_cpu == 0) 85 if (boot_cpu == 0)
87 change_c0_brcm_cmt_intr(0xf8018000, 86 cpu_hw_intr = 0x02;
88 (0x02 << 27) | (0x03 << 15));
89 else 87 else
90 change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27)); 88 cpu_hw_intr = 0x1d;
89
90 change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15));
91 91
92 /* single core, 2 threads (2 pipelines) */ 92 /* single core, 2 threads (2 pipelines) */
93 max_cpus = 2; 93 max_cpus = 2;
@@ -202,9 +202,15 @@ static void bmips_init_secondary(void)
202#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 202#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
203 void __iomem *cbr = BMIPS_GET_CBR(); 203 void __iomem *cbr = BMIPS_GET_CBR();
204 unsigned long old_vec; 204 unsigned long old_vec;
205 unsigned long relo_vector;
206 int boot_cpu;
207
208 boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
209 relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
210 BMIPS_RELO_VECTOR_CONTROL_1;
205 211
206 old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1); 212 old_vec = __raw_readl(cbr + relo_vector);
207 __raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1); 213 __raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
208 214
209 clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); 215 clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
210#elif defined(CONFIG_CPU_BMIPS5000) 216#elif defined(CONFIG_CPU_BMIPS5000)
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index e773659ccf9f..46048d24328c 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
803 dec_insn.next_pc_inc; 803 dec_insn.next_pc_inc;
804 return 1; 804 return 1;
805 break; 805 break;
806#ifdef CONFIG_CPU_CAVIUM_OCTEON
807 case lwc2_op: /* This is bbit0 on Octeon */
808 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
809 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
810 else
811 *contpc = regs->cp0_epc + 8;
812 return 1;
813 case ldc2_op: /* This is bbit032 on Octeon */
814 if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
815 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
816 else
817 *contpc = regs->cp0_epc + 8;
818 return 1;
819 case swc2_op: /* This is bbit1 on Octeon */
820 if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
821 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
822 else
823 *contpc = regs->cp0_epc + 8;
824 return 1;
825 case sdc2_op: /* This is bbit132 on Octeon */
826 if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
827 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
828 else
829 *contpc = regs->cp0_epc + 8;
830 return 1;
831#endif
806 case cop0_op: 832 case cop0_op:
807 case cop1_op: 833 case cop1_op:
808 case cop2_op: 834 case cop2_op:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index e4b1140cdae0..3a2b6e9f25cf 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -166,7 +166,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
166 reg.control[i] |= M_PERFCTL_USER; 166 reg.control[i] |= M_PERFCTL_USER;
167 if (ctr[i].exl) 167 if (ctr[i].exl)
168 reg.control[i] |= M_PERFCTL_EXL; 168 reg.control[i] |= M_PERFCTL_EXL;
169 if (current_cpu_type() == CPU_XLR) 169 if (boot_cpu_type() == CPU_XLR)
170 reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS; 170 reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
171 reg.counter[i] = 0x80000000 - ctr[i].count; 171 reg.counter[i] = 0x80000000 - ctr[i].count;
172 } 172 }
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index d22dc0d6f289..2b7e837dc2e2 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -206,11 +206,13 @@ static struct resource pnx833x_ethernet_resources[] = {
206 .end = PNX8335_IP3902_PORTS_END, 206 .end = PNX8335_IP3902_PORTS_END,
207 .flags = IORESOURCE_MEM, 207 .flags = IORESOURCE_MEM,
208 }, 208 },
209#ifdef CONFIG_SOC_PNX8335
209 [1] = { 210 [1] = {
210 .start = PNX8335_PIC_ETHERNET_INT, 211 .start = PNX8335_PIC_ETHERNET_INT,
211 .end = PNX8335_PIC_ETHERNET_INT, 212 .end = PNX8335_PIC_ETHERNET_INT,
212 .flags = IORESOURCE_IRQ, 213 .flags = IORESOURCE_IRQ,
213 }, 214 },
215#endif
214}; 216};
215 217
216static struct platform_device pnx833x_ethernet_device = { 218static struct platform_device pnx833x_ethernet_device = {
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
index 9f64c2387808..0238af1ba503 100644
--- a/arch/mips/powertv/asic/asic_devices.c
+++ b/arch/mips/powertv/asic/asic_devices.c
@@ -529,8 +529,7 @@ EXPORT_SYMBOL(asic_resource_get);
529 */ 529 */
530void platform_release_memory(void *ptr, int size) 530void platform_release_memory(void *ptr, int size)
531{ 531{
532 free_reserved_area((unsigned long)ptr, (unsigned long)(ptr + size), 532 free_reserved_area(ptr, ptr + size, -1, NULL);
533 -1, NULL);
534} 533}
535EXPORT_SYMBOL(platform_release_memory); 534EXPORT_SYMBOL(platform_release_memory);
536 535
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 99dbab1c59ac..d60bf98fa5cf 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -55,6 +55,7 @@ config GENERIC_CSUM
55 55
56source "init/Kconfig" 56source "init/Kconfig"
57 57
58source "kernel/Kconfig.freezer"
58 59
59menu "Processor type and features" 60menu "Processor type and features"
60 61
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
new file mode 100644
index 000000000000..f11006361297
--- /dev/null
+++ b/arch/parisc/configs/c8000_defconfig
@@ -0,0 +1,279 @@
1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_IKCONFIG=y
8CONFIG_IKCONFIG_PROC=y
9CONFIG_RELAY=y
10CONFIG_BLK_DEV_INITRD=y
11CONFIG_RD_BZIP2=y
12CONFIG_RD_LZMA=y
13CONFIG_RD_LZO=y
14CONFIG_EXPERT=y
15CONFIG_SYSCTL_SYSCALL=y
16CONFIG_SLAB=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODULE_FORCE_UNLOAD=y
20CONFIG_MODVERSIONS=y
21CONFIG_BLK_DEV_INTEGRITY=y
22CONFIG_PA8X00=y
23CONFIG_MLONGCALLS=y
24CONFIG_64BIT=y
25CONFIG_SMP=y
26CONFIG_PREEMPT=y
27# CONFIG_CROSS_MEMORY_ATTACH is not set
28CONFIG_IOMMU_CCIO=y
29CONFIG_PCI=y
30CONFIG_PCI_LBA=y
31# CONFIG_SUPERIO is not set
32# CONFIG_CHASSIS_LCD_LED is not set
33# CONFIG_PDC_CHASSIS is not set
34# CONFIG_PDC_CHASSIS_WARN is not set
35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
36CONFIG_BINFMT_MISC=m
37CONFIG_PACKET=y
38CONFIG_UNIX=y
39CONFIG_XFRM_USER=m
40CONFIG_XFRM_SUB_POLICY=y
41CONFIG_NET_KEY=m
42CONFIG_INET=y
43CONFIG_IP_MULTICAST=y
44CONFIG_IP_PNP=y
45CONFIG_IP_PNP_DHCP=y
46CONFIG_IP_PNP_BOOTP=y
47CONFIG_IP_PNP_RARP=y
48CONFIG_NET_IPIP=m
49CONFIG_IP_MROUTE=y
50CONFIG_IP_PIMSM_V1=y
51CONFIG_IP_PIMSM_V2=y
52CONFIG_SYN_COOKIES=y
53CONFIG_INET_AH=m
54CONFIG_INET_ESP=m
55CONFIG_INET_IPCOMP=m
56CONFIG_INET_XFRM_MODE_BEET=m
57CONFIG_INET_DIAG=m
58# CONFIG_IPV6 is not set
59CONFIG_IP_DCCP=m
60# CONFIG_IP_DCCP_CCID3 is not set
61CONFIG_TIPC=m
62CONFIG_LLC2=m
63CONFIG_DNS_RESOLVER=y
64CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
65# CONFIG_STANDALONE is not set
66CONFIG_PARPORT=y
67CONFIG_PARPORT_PC=y
68CONFIG_PARPORT_PC_FIFO=y
69CONFIG_BLK_DEV_UMEM=m
70CONFIG_BLK_DEV_LOOP=m
71CONFIG_BLK_DEV_CRYPTOLOOP=m
72CONFIG_BLK_DEV_SX8=m
73CONFIG_BLK_DEV_RAM=y
74CONFIG_BLK_DEV_RAM_SIZE=6144
75CONFIG_CDROM_PKTCDVD=m
76CONFIG_CDROM_PKTCDVD_WCACHE=y
77CONFIG_ATA_OVER_ETH=m
78CONFIG_IDE=y
79CONFIG_BLK_DEV_IDECD=y
80CONFIG_BLK_DEV_PLATFORM=y
81CONFIG_BLK_DEV_GENERIC=y
82CONFIG_BLK_DEV_SIIMAGE=y
83CONFIG_SCSI=y
84CONFIG_BLK_DEV_SD=y
85CONFIG_CHR_DEV_ST=m
86CONFIG_BLK_DEV_SR=m
87CONFIG_CHR_DEV_SG=y
88CONFIG_CHR_DEV_SCH=m
89CONFIG_SCSI_CONSTANTS=y
90CONFIG_SCSI_LOGGING=y
91CONFIG_SCSI_FC_ATTRS=y
92CONFIG_SCSI_SAS_LIBSAS=m
93CONFIG_ISCSI_TCP=m
94CONFIG_ISCSI_BOOT_SYSFS=m
95CONFIG_FUSION=y
96CONFIG_FUSION_SPI=y
97CONFIG_FUSION_SAS=y
98CONFIG_NETDEVICES=y
99CONFIG_DUMMY=m
100CONFIG_NETCONSOLE=m
101CONFIG_TUN=y
102CONFIG_E1000=y
103CONFIG_PPP=m
104CONFIG_PPP_BSDCOMP=m
105CONFIG_PPP_DEFLATE=m
106CONFIG_PPP_MPPE=m
107CONFIG_PPPOE=m
108CONFIG_PPP_ASYNC=m
109CONFIG_PPP_SYNC_TTY=m
110# CONFIG_WLAN is not set
111CONFIG_INPUT_FF_MEMLESS=m
112# CONFIG_KEYBOARD_ATKBD is not set
113# CONFIG_KEYBOARD_HIL_OLD is not set
114# CONFIG_KEYBOARD_HIL is not set
115CONFIG_MOUSE_PS2=m
116CONFIG_INPUT_MISC=y
117CONFIG_INPUT_CM109=m
118CONFIG_SERIO_SERPORT=m
119CONFIG_SERIO_PARKBD=m
120CONFIG_SERIO_GSCPS2=m
121# CONFIG_HP_SDC is not set
122CONFIG_SERIO_PCIPS2=m
123CONFIG_SERIO_LIBPS2=y
124CONFIG_SERIO_RAW=m
125CONFIG_SERIAL_8250=y
126# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
127CONFIG_SERIAL_8250_CONSOLE=y
128CONFIG_SERIAL_8250_NR_UARTS=8
129CONFIG_SERIAL_8250_RUNTIME_UARTS=8
130CONFIG_SERIAL_8250_EXTENDED=y
131# CONFIG_SERIAL_MUX is not set
132CONFIG_SERIAL_JSM=m
133CONFIG_PRINTER=y
134CONFIG_HW_RANDOM=y
135CONFIG_RAW_DRIVER=m
136CONFIG_PTP_1588_CLOCK=y
137CONFIG_SSB=m
138CONFIG_SSB_DRIVER_PCICORE=y
139CONFIG_AGP=y
140CONFIG_AGP_PARISC=y
141CONFIG_DRM=y
142CONFIG_DRM_RADEON=y
143CONFIG_FIRMWARE_EDID=y
144CONFIG_FB_FOREIGN_ENDIAN=y
145CONFIG_FB_MODE_HELPERS=y
146CONFIG_FB_TILEBLITTING=y
147# CONFIG_FB_STI is not set
148CONFIG_BACKLIGHT_LCD_SUPPORT=y
149# CONFIG_LCD_CLASS_DEVICE is not set
150# CONFIG_BACKLIGHT_GENERIC is not set
151CONFIG_FRAMEBUFFER_CONSOLE=y
152# CONFIG_STI_CONSOLE is not set
153CONFIG_LOGO=y
154# CONFIG_LOGO_LINUX_MONO is not set
155# CONFIG_LOGO_LINUX_VGA16 is not set
156# CONFIG_LOGO_LINUX_CLUT224 is not set
157CONFIG_SOUND=m
158CONFIG_SND=m
159CONFIG_SND_SEQUENCER=m
160CONFIG_SND_SEQ_DUMMY=m
161CONFIG_SND_MIXER_OSS=m
162CONFIG_SND_PCM_OSS=m
163CONFIG_SND_SEQUENCER_OSS=y
164CONFIG_SND_VERBOSE_PRINTK=y
165CONFIG_SND_AD1889=m
166# CONFIG_SND_USB is not set
167# CONFIG_SND_GSC is not set
168CONFIG_HID_A4TECH=m
169CONFIG_HID_APPLE=m
170CONFIG_HID_BELKIN=m
171CONFIG_HID_CHERRY=m
172CONFIG_HID_CHICONY=m
173CONFIG_HID_CYPRESS=m
174CONFIG_HID_DRAGONRISE=m
175CONFIG_HID_EZKEY=m
176CONFIG_HID_KYE=m
177CONFIG_HID_GYRATION=m
178CONFIG_HID_TWINHAN=m
179CONFIG_HID_KENSINGTON=m
180CONFIG_HID_LOGITECH=m
181CONFIG_HID_LOGITECH_DJ=m
182CONFIG_HID_MICROSOFT=m
183CONFIG_HID_MONTEREY=m
184CONFIG_HID_NTRIG=m
185CONFIG_HID_ORTEK=m
186CONFIG_HID_PANTHERLORD=m
187CONFIG_HID_PETALYNX=m
188CONFIG_HID_SAMSUNG=m
189CONFIG_HID_SUNPLUS=m
190CONFIG_HID_GREENASIA=m
191CONFIG_HID_SMARTJOYPLUS=m
192CONFIG_HID_TOPSEED=m
193CONFIG_HID_THRUSTMASTER=m
194CONFIG_HID_ZEROPLUS=m
195CONFIG_USB_HID=m
196CONFIG_USB=y
197CONFIG_USB_OHCI_HCD=y
198CONFIG_USB_STORAGE=y
199CONFIG_EXT2_FS=y
200CONFIG_EXT2_FS_XATTR=y
201CONFIG_EXT2_FS_POSIX_ACL=y
202CONFIG_EXT2_FS_SECURITY=y
203CONFIG_EXT3_FS=y
204# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
205CONFIG_EXT4_FS=m
206CONFIG_REISERFS_FS=m
207CONFIG_REISERFS_PROC_INFO=y
208CONFIG_XFS_FS=m
209CONFIG_XFS_POSIX_ACL=y
210CONFIG_QUOTA=y
211CONFIG_QFMT_V1=m
212CONFIG_QFMT_V2=m
213CONFIG_AUTOFS4_FS=m
214CONFIG_FUSE_FS=m
215CONFIG_ISO9660_FS=y
216CONFIG_JOLIET=y
217CONFIG_MSDOS_FS=m
218CONFIG_VFAT_FS=m
219CONFIG_PROC_KCORE=y
220CONFIG_TMPFS=y
221CONFIG_TMPFS_XATTR=y
222CONFIG_NFS_FS=m
223CONFIG_NLS_CODEPAGE_437=m
224CONFIG_NLS_CODEPAGE_737=m
225CONFIG_NLS_CODEPAGE_775=m
226CONFIG_NLS_CODEPAGE_850=m
227CONFIG_NLS_CODEPAGE_852=m
228CONFIG_NLS_CODEPAGE_855=m
229CONFIG_NLS_CODEPAGE_857=m
230CONFIG_NLS_CODEPAGE_860=m
231CONFIG_NLS_CODEPAGE_861=m
232CONFIG_NLS_CODEPAGE_862=m
233CONFIG_NLS_CODEPAGE_863=m
234CONFIG_NLS_CODEPAGE_864=m
235CONFIG_NLS_CODEPAGE_865=m
236CONFIG_NLS_CODEPAGE_866=m
237CONFIG_NLS_CODEPAGE_869=m
238CONFIG_NLS_CODEPAGE_936=m
239CONFIG_NLS_CODEPAGE_950=m
240CONFIG_NLS_CODEPAGE_932=m
241CONFIG_NLS_CODEPAGE_949=m
242CONFIG_NLS_CODEPAGE_874=m
243CONFIG_NLS_ISO8859_8=m
244CONFIG_NLS_CODEPAGE_1250=m
245CONFIG_NLS_CODEPAGE_1251=m
246CONFIG_NLS_ASCII=m
247CONFIG_NLS_ISO8859_1=m
248CONFIG_NLS_ISO8859_2=m
249CONFIG_NLS_ISO8859_3=m
250CONFIG_NLS_ISO8859_4=m
251CONFIG_NLS_ISO8859_5=m
252CONFIG_NLS_ISO8859_6=m
253CONFIG_NLS_ISO8859_7=m
254CONFIG_NLS_ISO8859_9=m
255CONFIG_NLS_ISO8859_13=m
256CONFIG_NLS_ISO8859_14=m
257CONFIG_NLS_ISO8859_15=m
258CONFIG_NLS_KOI8_R=m
259CONFIG_NLS_KOI8_U=m
260CONFIG_NLS_UTF8=m
261CONFIG_UNUSED_SYMBOLS=y
262CONFIG_DEBUG_FS=y
263CONFIG_MAGIC_SYSRQ=y
264CONFIG_DEBUG_SLAB=y
265CONFIG_DEBUG_SLAB_LEAK=y
266CONFIG_DEBUG_MEMORY_INIT=y
267CONFIG_DEBUG_STACKOVERFLOW=y
268CONFIG_LOCKUP_DETECTOR=y
269CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
270CONFIG_PANIC_ON_OOPS=y
271CONFIG_DEBUG_RT_MUTEXES=y
272CONFIG_RT_MUTEX_TESTER=y
273CONFIG_PROVE_RCU_DELAY=y
274CONFIG_DEBUG_BLOCK_EXT_DEVT=y
275CONFIG_LATENCYTOP=y
276CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
277CONFIG_KEYS=y
278# CONFIG_CRYPTO_HW is not set
279CONFIG_FONTS=y
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
index 9afdad6c2ffb..eaf4dc1c7294 100644
--- a/arch/parisc/include/asm/parisc-device.h
+++ b/arch/parisc/include/asm/parisc-device.h
@@ -23,6 +23,7 @@ struct parisc_device {
23 /* generic info returned from pdc_pat_cell_module() */ 23 /* generic info returned from pdc_pat_cell_module() */
24 unsigned long mod_info; /* PAT specific - Misc Module info */ 24 unsigned long mod_info; /* PAT specific - Misc Module info */
25 unsigned long pmod_loc; /* physical Module location */ 25 unsigned long pmod_loc; /* physical Module location */
26 unsigned long mod0;
26#endif 27#endif
27 u64 dma_mask; /* DMA mask for I/O */ 28 u64 dma_mask; /* DMA mask for I/O */
28 struct device dev; 29 struct device dev;
@@ -61,4 +62,6 @@ parisc_get_drvdata(struct parisc_device *d)
61 62
62extern struct bus_type parisc_bus_type; 63extern struct bus_type parisc_bus_type;
63 64
65int iosapic_serial_irq(struct parisc_device *dev);
66
64#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/ 67#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 2e65aa54bd10..c035673209f7 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -71,18 +71,27 @@ flush_cache_all_local(void)
71} 71}
72EXPORT_SYMBOL(flush_cache_all_local); 72EXPORT_SYMBOL(flush_cache_all_local);
73 73
74/* Virtual address of pfn. */
75#define pfn_va(pfn) __va(PFN_PHYS(pfn))
76
74void 77void
75update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 78update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
76{ 79{
77 struct page *page = pte_page(*ptep); 80 unsigned long pfn = pte_pfn(*ptep);
81 struct page *page;
78 82
79 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && 83 /* We don't have pte special. As a result, we can be called with
80 test_bit(PG_dcache_dirty, &page->flags)) { 84 an invalid pfn and we don't need to flush the kernel dcache page.
85 This occurs with FireGL card in C8000. */
86 if (!pfn_valid(pfn))
87 return;
81 88
82 flush_kernel_dcache_page(page); 89 page = pfn_to_page(pfn);
90 if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 flush_kernel_dcache_page_addr(pfn_va(pfn));
83 clear_bit(PG_dcache_dirty, &page->flags); 92 clear_bit(PG_dcache_dirty, &page->flags);
84 } else if (parisc_requires_coherency()) 93 } else if (parisc_requires_coherency())
85 flush_kernel_dcache_page(page); 94 flush_kernel_dcache_page_addr(pfn_va(pfn));
86} 95}
87 96
88void 97void
@@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
495 504
496void flush_cache_mm(struct mm_struct *mm) 505void flush_cache_mm(struct mm_struct *mm)
497{ 506{
507 struct vm_area_struct *vma;
508 pgd_t *pgd;
509
498 /* Flushing the whole cache on each cpu takes forever on 510 /* Flushing the whole cache on each cpu takes forever on
499 rp3440, etc. So, avoid it if the mm isn't too big. */ 511 rp3440, etc. So, avoid it if the mm isn't too big. */
500 if (mm_total_size(mm) < parisc_cache_flush_threshold) { 512 if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
501 struct vm_area_struct *vma; 513 flush_cache_all();
502 514 return;
503 if (mm->context == mfsp(3)) { 515 }
504 for (vma = mm->mmap; vma; vma = vma->vm_next) { 516
505 flush_user_dcache_range_asm(vma->vm_start, 517 if (mm->context == mfsp(3)) {
506 vma->vm_end); 518 for (vma = mm->mmap; vma; vma = vma->vm_next) {
507 if (vma->vm_flags & VM_EXEC) 519 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
508 flush_user_icache_range_asm( 520 if ((vma->vm_flags & VM_EXEC) == 0)
509 vma->vm_start, vma->vm_end); 521 continue;
510 } 522 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
511 } else {
512 pgd_t *pgd = mm->pgd;
513
514 for (vma = mm->mmap; vma; vma = vma->vm_next) {
515 unsigned long addr;
516
517 for (addr = vma->vm_start; addr < vma->vm_end;
518 addr += PAGE_SIZE) {
519 pte_t *ptep = get_ptep(pgd, addr);
520 if (ptep != NULL) {
521 pte_t pte = *ptep;
522 __flush_cache_page(vma, addr,
523 page_to_phys(pte_page(pte)));
524 }
525 }
526 }
527 } 523 }
528 return; 524 return;
529 } 525 }
530 526
531#ifdef CONFIG_SMP 527 pgd = mm->pgd;
532 flush_cache_all(); 528 for (vma = mm->mmap; vma; vma = vma->vm_next) {
533#else 529 unsigned long addr;
534 flush_cache_all_local(); 530
535#endif 531 for (addr = vma->vm_start; addr < vma->vm_end;
532 addr += PAGE_SIZE) {
533 unsigned long pfn;
534 pte_t *ptep = get_ptep(pgd, addr);
535 if (!ptep)
536 continue;
537 pfn = pte_pfn(*ptep);
538 if (!pfn_valid(pfn))
539 continue;
540 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
541 }
542 }
536} 543}
537 544
538void 545void
@@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
556void flush_cache_range(struct vm_area_struct *vma, 563void flush_cache_range(struct vm_area_struct *vma,
557 unsigned long start, unsigned long end) 564 unsigned long start, unsigned long end)
558{ 565{
566 unsigned long addr;
567 pgd_t *pgd;
568
559 BUG_ON(!vma->vm_mm->context); 569 BUG_ON(!vma->vm_mm->context);
560 570
561 if ((end - start) < parisc_cache_flush_threshold) { 571 if ((end - start) >= parisc_cache_flush_threshold) {
562 if (vma->vm_mm->context == mfsp(3)) {
563 flush_user_dcache_range_asm(start, end);
564 if (vma->vm_flags & VM_EXEC)
565 flush_user_icache_range_asm(start, end);
566 } else {
567 unsigned long addr;
568 pgd_t *pgd = vma->vm_mm->pgd;
569
570 for (addr = start & PAGE_MASK; addr < end;
571 addr += PAGE_SIZE) {
572 pte_t *ptep = get_ptep(pgd, addr);
573 if (ptep != NULL) {
574 pte_t pte = *ptep;
575 flush_cache_page(vma,
576 addr, pte_pfn(pte));
577 }
578 }
579 }
580 } else {
581#ifdef CONFIG_SMP
582 flush_cache_all(); 572 flush_cache_all();
583#else 573 return;
584 flush_cache_all_local(); 574 }
585#endif 575
576 if (vma->vm_mm->context == mfsp(3)) {
577 flush_user_dcache_range_asm(start, end);
578 if (vma->vm_flags & VM_EXEC)
579 flush_user_icache_range_asm(start, end);
580 return;
581 }
582
583 pgd = vma->vm_mm->pgd;
584 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
585 unsigned long pfn;
586 pte_t *ptep = get_ptep(pgd, addr);
587 if (!ptep)
588 continue;
589 pfn = pte_pfn(*ptep);
590 if (pfn_valid(pfn))
591 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
586 } 592 }
587} 593}
588 594
@@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
591{ 597{
592 BUG_ON(!vma->vm_mm->context); 598 BUG_ON(!vma->vm_mm->context);
593 599
594 flush_tlb_page(vma, vmaddr); 600 if (pfn_valid(pfn)) {
595 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); 601 flush_tlb_page(vma, vmaddr);
596 602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
603 }
597} 604}
598 605
599#ifdef CONFIG_PARISC_TMPALIAS 606#ifdef CONFIG_PARISC_TMPALIAS
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 3295ef4a185d..f0b6722fc706 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -211,6 +211,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
211 /* REVISIT: who is the consumer of this? not sure yet... */ 211 /* REVISIT: who is the consumer of this? not sure yet... */
212 dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */ 212 dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
213 dev->pmod_loc = pa_pdc_cell->mod_location; 213 dev->pmod_loc = pa_pdc_cell->mod_location;
214 dev->mod0 = pa_pdc_cell->mod[0];
214 215
215 register_parisc_device(dev); /* advertise device */ 216 register_parisc_device(dev); /* advertise device */
216 217
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 940188d1942c..07349b002687 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -56,13 +56,6 @@
56#define A(__x) ((unsigned long)(__x)) 56#define A(__x) ((unsigned long)(__x))
57 57
58/* 58/*
59 * Atomically swap in the new signal mask, and wait for a signal.
60 */
61#ifdef CONFIG_64BIT
62#include "sys32.h"
63#endif
64
65/*
66 * Do a signal return - restore sigcontext. 59 * Do a signal return - restore sigcontext.
67 */ 60 */
68 61
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 33eca1b04926..6c6a271a6140 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -34,7 +34,6 @@
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35 35
36#include "signal32.h" 36#include "signal32.h"
37#include "sys32.h"
38 37
39#define DEBUG_COMPAT_SIG 0 38#define DEBUG_COMPAT_SIG 0
40#define DEBUG_COMPAT_SIG_LEVEL 2 39#define DEBUG_COMPAT_SIG_LEVEL 2
diff --git a/arch/parisc/kernel/sys32.h b/arch/parisc/kernel/sys32.h
deleted file mode 100644
index 60dd470f39f8..000000000000
--- a/arch/parisc/kernel/sys32.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org>
3 * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
4 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef _PARISC64_KERNEL_SYS32_H
21#define _PARISC64_KERNEL_SYS32_H
22
23#include <linux/compat.h>
24
25/* Call a kernel syscall which will use kernel space instead of user
26 * space for its copy_to/from_user.
27 */
28#define KERNEL_SYSCALL(ret, syscall, args...) \
29{ \
30 mm_segment_t old_fs = get_fs(); \
31 set_fs(KERNEL_DS); \
32 ret = syscall(args); \
33 set_fs (old_fs); \
34}
35
36#endif
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index a134ff4da12e..bb9f3b64de55 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -42,8 +42,6 @@
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <asm/mmu_context.h> 43#include <asm/mmu_context.h>
44 44
45#include "sys32.h"
46
47#undef DEBUG 45#undef DEBUG
48 46
49#ifdef DEBUG 47#ifdef DEBUG
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3bf72cd2c8fc..dbd9d3c991e8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -566,7 +566,7 @@ config SCHED_SMT
566config PPC_DENORMALISATION 566config PPC_DENORMALISATION
567 bool "PowerPC denormalisation exception handling" 567 bool "PowerPC denormalisation exception handling"
568 depends on PPC_BOOK3S_64 568 depends on PPC_BOOK3S_64
569 default "n" 569 default "y" if PPC_POWERNV
570 ---help--- 570 ---help---
571 Add support for handling denormalisation of single precision 571 Add support for handling denormalisation of single precision
572 values. Useful for bare metal only. If unsure say Y here. 572 values. Useful for bare metal only. If unsure say Y here.
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index c86fcb92358e..0e8cfd09da2f 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -58,7 +58,7 @@ CONFIG_SCHED_SMT=y
58CONFIG_PPC_DENORMALISATION=y 58CONFIG_PPC_DENORMALISATION=y
59CONFIG_PCCARD=y 59CONFIG_PCCARD=y
60CONFIG_ELECTRA_CF=y 60CONFIG_ELECTRA_CF=y
61CONFIG_HOTPLUG_PCI=m 61CONFIG_HOTPLUG_PCI=y
62CONFIG_HOTPLUG_PCI_RPA=m 62CONFIG_HOTPLUG_PCI_RPA=m
63CONFIG_HOTPLUG_PCI_RPA_DLPAR=m 63CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
64CONFIG_PACKET=y 64CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 4b20f76172e2..0085dc4642c5 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -32,7 +32,7 @@ CONFIG_IRQ_ALL_CPUS=y
32CONFIG_SPARSEMEM_MANUAL=y 32CONFIG_SPARSEMEM_MANUAL=y
33CONFIG_PCI_MSI=y 33CONFIG_PCI_MSI=y
34CONFIG_PCCARD=y 34CONFIG_PCCARD=y
35CONFIG_HOTPLUG_PCI=m 35CONFIG_HOTPLUG_PCI=y
36CONFIG_PACKET=y 36CONFIG_PACKET=y
37CONFIG_UNIX=y 37CONFIG_UNIX=y
38CONFIG_XFRM_USER=m 38CONFIG_XFRM_USER=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index bea8587c3af5..1d4b9763895d 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -53,7 +53,7 @@ CONFIG_PPC_64K_PAGES=y
53CONFIG_PPC_SUBPAGE_PROT=y 53CONFIG_PPC_SUBPAGE_PROT=y
54CONFIG_SCHED_SMT=y 54CONFIG_SCHED_SMT=y
55CONFIG_PPC_DENORMALISATION=y 55CONFIG_PPC_DENORMALISATION=y
56CONFIG_HOTPLUG_PCI=m 56CONFIG_HOTPLUG_PCI=y
57CONFIG_HOTPLUG_PCI_RPA=m 57CONFIG_HOTPLUG_PCI_RPA=m
58CONFIG_HOTPLUG_PCI_RPA_DLPAR=m 58CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
59CONFIG_PACKET=y 59CONFIG_PACKET=y
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 09a8743143f3..d3e5e9bc8f94 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -55,6 +55,8 @@ struct device_node;
55#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ 55#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
56#define EEH_PE_PHB_DEAD (1 << 2) /* Dead PHB */ 56#define EEH_PE_PHB_DEAD (1 << 2) /* Dead PHB */
57 57
58#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
59
58struct eeh_pe { 60struct eeh_pe {
59 int type; /* PE type: PHB/Bus/Device */ 61 int type; /* PE type: PHB/Bus/Device */
60 int state; /* PE EEH dependent mode */ 62 int state; /* PE EEH dependent mode */
@@ -72,8 +74,8 @@ struct eeh_pe {
72 struct list_head child; /* Child PEs */ 74 struct list_head child; /* Child PEs */
73}; 75};
74 76
75#define eeh_pe_for_each_dev(pe, edev) \ 77#define eeh_pe_for_each_dev(pe, edev, tmp) \
76 list_for_each_entry(edev, &pe->edevs, list) 78 list_for_each_entry_safe(edev, tmp, &pe->edevs, list)
77 79
78/* 80/*
79 * The struct is used to trace EEH state for the associated 81 * The struct is used to trace EEH state for the associated
@@ -82,7 +84,13 @@ struct eeh_pe {
82 * another tree except the currently existing tree of PCI 84 * another tree except the currently existing tree of PCI
83 * buses and PCI devices 85 * buses and PCI devices
84 */ 86 */
85#define EEH_DEV_IRQ_DISABLED (1<<0) /* Interrupt disabled */ 87#define EEH_DEV_BRIDGE (1 << 0) /* PCI bridge */
88#define EEH_DEV_ROOT_PORT (1 << 1) /* PCIe root port */
89#define EEH_DEV_DS_PORT (1 << 2) /* Downstream port */
90#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */
91#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */
92
93#define EEH_DEV_SYSFS (1 << 8) /* Sysfs created */
86 94
87struct eeh_dev { 95struct eeh_dev {
88 int mode; /* EEH mode */ 96 int mode; /* EEH mode */
@@ -90,11 +98,13 @@ struct eeh_dev {
90 int config_addr; /* Config address */ 98 int config_addr; /* Config address */
91 int pe_config_addr; /* PE config address */ 99 int pe_config_addr; /* PE config address */
92 u32 config_space[16]; /* Saved PCI config space */ 100 u32 config_space[16]; /* Saved PCI config space */
101 u8 pcie_cap; /* Saved PCIe capability */
93 struct eeh_pe *pe; /* Associated PE */ 102 struct eeh_pe *pe; /* Associated PE */
94 struct list_head list; /* Form link list in the PE */ 103 struct list_head list; /* Form link list in the PE */
95 struct pci_controller *phb; /* Associated PHB */ 104 struct pci_controller *phb; /* Associated PHB */
96 struct device_node *dn; /* Associated device node */ 105 struct device_node *dn; /* Associated device node */
97 struct pci_dev *pdev; /* Associated PCI device */ 106 struct pci_dev *pdev; /* Associated PCI device */
107 struct pci_bus *bus; /* PCI bus for partial hotplug */
98}; 108};
99 109
100static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev) 110static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
@@ -193,8 +203,10 @@ int eeh_phb_pe_create(struct pci_controller *phb);
193struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); 203struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
194struct eeh_pe *eeh_pe_get(struct eeh_dev *edev); 204struct eeh_pe *eeh_pe_get(struct eeh_dev *edev);
195int eeh_add_to_parent_pe(struct eeh_dev *edev); 205int eeh_add_to_parent_pe(struct eeh_dev *edev);
196int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe); 206int eeh_rmv_from_parent_pe(struct eeh_dev *edev);
197void eeh_pe_update_time_stamp(struct eeh_pe *pe); 207void eeh_pe_update_time_stamp(struct eeh_pe *pe);
208void *eeh_pe_traverse(struct eeh_pe *root,
209 eeh_traverse_func fn, void *flag);
198void *eeh_pe_dev_traverse(struct eeh_pe *root, 210void *eeh_pe_dev_traverse(struct eeh_pe *root,
199 eeh_traverse_func fn, void *flag); 211 eeh_traverse_func fn, void *flag);
200void eeh_pe_restore_bars(struct eeh_pe *pe); 212void eeh_pe_restore_bars(struct eeh_pe *pe);
@@ -209,10 +221,12 @@ unsigned long eeh_check_failure(const volatile void __iomem *token,
209 unsigned long val); 221 unsigned long val);
210int eeh_dev_check_failure(struct eeh_dev *edev); 222int eeh_dev_check_failure(struct eeh_dev *edev);
211void eeh_addr_cache_build(void); 223void eeh_addr_cache_build(void);
224void eeh_add_device_early(struct device_node *);
212void eeh_add_device_tree_early(struct device_node *); 225void eeh_add_device_tree_early(struct device_node *);
226void eeh_add_device_late(struct pci_dev *);
213void eeh_add_device_tree_late(struct pci_bus *); 227void eeh_add_device_tree_late(struct pci_bus *);
214void eeh_add_sysfs_files(struct pci_bus *); 228void eeh_add_sysfs_files(struct pci_bus *);
215void eeh_remove_bus_device(struct pci_dev *, int); 229void eeh_remove_device(struct pci_dev *);
216 230
217/** 231/**
218 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. 232 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
@@ -252,13 +266,17 @@ static inline unsigned long eeh_check_failure(const volatile void __iomem *token
252 266
253static inline void eeh_addr_cache_build(void) { } 267static inline void eeh_addr_cache_build(void) { }
254 268
269static inline void eeh_add_device_early(struct device_node *dn) { }
270
255static inline void eeh_add_device_tree_early(struct device_node *dn) { } 271static inline void eeh_add_device_tree_early(struct device_node *dn) { }
256 272
273static inline void eeh_add_device_late(struct pci_dev *dev) { }
274
257static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } 275static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
258 276
259static inline void eeh_add_sysfs_files(struct pci_bus *bus) { } 277static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
260 278
261static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { } 279static inline void eeh_remove_device(struct pci_dev *dev) { }
262 280
263#define EEH_POSSIBLE_ERROR(val, type) (0) 281#define EEH_POSSIBLE_ERROR(val, type) (0)
264#define EEH_IO_ERROR_VALUE(size) (-1UL) 282#define EEH_IO_ERROR_VALUE(size) (-1UL)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index ba713f166fa5..10be1dd01c6b 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -96,10 +96,11 @@ static inline bool arch_irqs_disabled(void)
96#endif 96#endif
97 97
98#define hard_irq_disable() do { \ 98#define hard_irq_disable() do { \
99 u8 _was_enabled = get_paca()->soft_enabled; \ 99 u8 _was_enabled; \
100 __hard_irq_disable(); \ 100 __hard_irq_disable(); \
101 get_paca()->soft_enabled = 0; \ 101 _was_enabled = local_paca->soft_enabled; \
102 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ 102 local_paca->soft_enabled = 0; \
103 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
103 if (_was_enabled) \ 104 if (_was_enabled) \
104 trace_hardirqs_off(); \ 105 trace_hardirqs_off(); \
105} while(0) 106} while(0)
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index c1df590ec444..49fa55bfbac4 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -82,10 +82,9 @@ struct exception_table_entry;
82void sort_ex_table(struct exception_table_entry *start, 82void sort_ex_table(struct exception_table_entry *start,
83 struct exception_table_entry *finish); 83 struct exception_table_entry *finish);
84 84
85#ifdef CONFIG_MODVERSIONS 85#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
86#define ARCH_RELOCATES_KCRCTAB 86#define ARCH_RELOCATES_KCRCTAB
87 87#define reloc_start PHYSICAL_START
88extern const unsigned long reloc_start[];
89#endif 88#endif
90#endif /* __KERNEL__ */ 89#endif /* __KERNEL__ */
91#endif /* _ASM_POWERPC_MODULE_H */ 90#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 2c1d8cb9b265..32d0d2018faf 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -209,7 +209,6 @@ static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
209extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); 209extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
210 210
211/** Remove all of the PCI devices under this bus */ 211/** Remove all of the PCI devices under this bus */
212extern void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe);
213extern void pcibios_remove_pci_devices(struct pci_bus *bus); 212extern void pcibios_remove_pci_devices(struct pci_bus *bus);
214 213
215/** Discover new pci devices under this bus, and add them */ 214/** Discover new pci devices under this bus, and add them */
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 2dd7bfc459be..8b2492644754 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/hw_irq.h> 13#include <asm/hw_irq.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <uapi/asm/perf_event.h>
15 16
16#define MAX_HWEVENTS 8 17#define MAX_HWEVENTS 8
17#define MAX_EVENT_ALTERNATIVES 8 18#define MAX_EVENT_ALTERNATIVES 8
@@ -69,11 +70,6 @@ struct power_pmu {
69#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ 70#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
70#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ 71#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
71 72
72/*
73 * We use the event config bit 63 as a flag to request EBB.
74 */
75#define EVENT_CONFIG_EBB_SHIFT 63
76
77extern int register_power_pmu(struct power_pmu *); 73extern int register_power_pmu(struct power_pmu *);
78 74
79struct pt_regs; 75struct pt_regs;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 47a35b08b963..e378cccfca55 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -247,6 +247,10 @@ struct thread_struct {
247 unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */ 247 unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
248 struct pt_regs ckpt_regs; /* Checkpointed registers */ 248 struct pt_regs ckpt_regs; /* Checkpointed registers */
249 249
250 unsigned long tm_tar;
251 unsigned long tm_ppr;
252 unsigned long tm_dscr;
253
250 /* 254 /*
251 * Transactional FP and VSX 0-31 register set. 255 * Transactional FP and VSX 0-31 register set.
252 * NOTE: the sense of these is the opposite of the integer ckpt_regs! 256 * NOTE: the sense of these is the opposite of the integer ckpt_regs!
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5d7d9c2a5473..99222e27f173 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -254,19 +254,28 @@
254#define SPRN_HRMOR 0x139 /* Real mode offset register */ 254#define SPRN_HRMOR 0x139 /* Real mode offset register */
255#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ 255#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
256#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ 256#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
257/* HFSCR and FSCR bit numbers are the same */
258#define FSCR_TAR_LG 8 /* Enable Target Address Register */
259#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
260#define FSCR_TM_LG 5 /* Enable Transactional Memory */
261#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
262#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
263#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
264#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
265#define FSCR_FP_LG 0 /* Enable Floating Point */
257#define SPRN_FSCR 0x099 /* Facility Status & Control Register */ 266#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
258#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ 267#define FSCR_TAR __MASK(FSCR_TAR_LG)
259#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ 268#define FSCR_EBB __MASK(FSCR_EBB_LG)
260#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ 269#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
261#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ 270#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
262#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ 271#define HFSCR_TAR __MASK(FSCR_TAR_LG)
263#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ 272#define HFSCR_EBB __MASK(FSCR_EBB_LG)
264#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */ 273#define HFSCR_TM __MASK(FSCR_TM_LG)
265#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */ 274#define HFSCR_PM __MASK(FSCR_PM_LG)
266#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/ 275#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
267#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ 276#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
268#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */ 277#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
269#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */ 278#define HFSCR_FP __MASK(FSCR_FP_LG)
270#define SPRN_TAR 0x32f /* Target Address Register */ 279#define SPRN_TAR 0x32f /* Target Address Register */
271#define SPRN_LPCR 0x13E /* LPAR Control Register */ 280#define SPRN_LPCR 0x13E /* LPAR Control Register */
272#define LPCR_VPM0 (1ul << (63-0)) 281#define LPCR_VPM0 (1ul << (63-0))
@@ -1088,7 +1097,8 @@
1088#define PVR_970MP 0x0044 1097#define PVR_970MP 0x0044
1089#define PVR_970GX 0x0045 1098#define PVR_970GX 0x0045
1090#define PVR_POWER7p 0x004A 1099#define PVR_POWER7p 0x004A
1091#define PVR_POWER8 0x004B 1100#define PVR_POWER8E 0x004B
1101#define PVR_POWER8 0x004D
1092#define PVR_BE 0x0070 1102#define PVR_BE 0x0070
1093#define PVR_PA6T 0x0090 1103#define PVR_PA6T 0x0090
1094 1104
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabebcdca..48cfc858abd6 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu);
145#define smp_setup_cpu_maps() 145#define smp_setup_cpu_maps()
146static inline void inhibit_secondary_onlining(void) {} 146static inline void inhibit_secondary_onlining(void) {}
147static inline void uninhibit_secondary_onlining(void) {} 147static inline void uninhibit_secondary_onlining(void) {}
148static inline const struct cpumask *cpu_sibling_mask(int cpu)
149{
150 return cpumask_of(cpu);
151}
148 152
149#endif /* CONFIG_SMP */ 153#endif /* CONFIG_SMP */
150 154
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 49a13e0ef234..294c2cedcf7a 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *,
15struct thread_struct; 15struct thread_struct;
16extern struct task_struct *_switch(struct thread_struct *prev, 16extern struct task_struct *_switch(struct thread_struct *prev,
17 struct thread_struct *next); 17 struct thread_struct *next);
18#ifdef CONFIG_PPC_BOOK3S_64
19static inline void save_tar(struct thread_struct *prev)
20{
21 if (cpu_has_feature(CPU_FTR_ARCH_207S))
22 prev->tar = mfspr(SPRN_TAR);
23}
24#else
25static inline void save_tar(struct thread_struct *prev) {}
26#endif
18 27
19extern void giveup_fpu(struct task_struct *); 28extern void giveup_fpu(struct task_struct *);
20extern void load_up_fpu(void); 29extern void load_up_fpu(void);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index 5182c8622b54..48be855ef37b 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -20,6 +20,7 @@ header-y += mman.h
20header-y += msgbuf.h 20header-y += msgbuf.h
21header-y += nvram.h 21header-y += nvram.h
22header-y += param.h 22header-y += param.h
23header-y += perf_event.h
23header-y += poll.h 24header-y += poll.h
24header-y += posix_types.h 25header-y += posix_types.h
25header-y += ps3fb.h 26header-y += ps3fb.h
diff --git a/arch/powerpc/include/uapi/asm/perf_event.h b/arch/powerpc/include/uapi/asm/perf_event.h
new file mode 100644
index 000000000000..80a4d40cf5bc
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/perf_event.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright 2013 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; version 2 of the
7 * License.
8 */
9
10#ifndef _UAPI_ASM_POWERPC_PERF_EVENT_H
11#define _UAPI_ASM_POWERPC_PERF_EVENT_H
12
13/*
14 * We use bit 63 of perf_event_attr.config as a flag to request EBB.
15 */
16#define PERF_EVENT_CONFIG_EBB_SHIFT 63
17
18#endif /* _UAPI_ASM_POWERPC_PERF_EVENT_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c7e8afc2ead0..8207459efe56 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -138,6 +138,9 @@ int main(void)
138 DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); 138 DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
139 DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); 139 DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
140 DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); 140 DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
141 DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
142 DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
143 DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
141 DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); 144 DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
142 DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, 145 DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
143 transact_vr[0])); 146 transact_vr[0]));
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 2a45d0f04385..22973a74df73 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -494,9 +494,27 @@ static struct cpu_spec __initdata cpu_specs[] = {
494 .cpu_restore = __restore_cpu_power7, 494 .cpu_restore = __restore_cpu_power7,
495 .platform = "power7+", 495 .platform = "power7+",
496 }, 496 },
497 { /* Power8 */ 497 { /* Power8E */
498 .pvr_mask = 0xffff0000, 498 .pvr_mask = 0xffff0000,
499 .pvr_value = 0x004b0000, 499 .pvr_value = 0x004b0000,
500 .cpu_name = "POWER8E (raw)",
501 .cpu_features = CPU_FTRS_POWER8,
502 .cpu_user_features = COMMON_USER_POWER8,
503 .cpu_user_features2 = COMMON_USER2_POWER8,
504 .mmu_features = MMU_FTRS_POWER8,
505 .icache_bsize = 128,
506 .dcache_bsize = 128,
507 .num_pmcs = 6,
508 .pmc_type = PPC_PMC_IBM,
509 .oprofile_cpu_type = "ppc64/power8",
510 .oprofile_type = PPC_OPROFILE_INVALID,
511 .cpu_setup = __setup_cpu_power8,
512 .cpu_restore = __restore_cpu_power8,
513 .platform = "power8",
514 },
515 { /* Power8 */
516 .pvr_mask = 0xffff0000,
517 .pvr_value = 0x004d0000,
500 .cpu_name = "POWER8 (raw)", 518 .cpu_name = "POWER8 (raw)",
501 .cpu_features = CPU_FTRS_POWER8, 519 .cpu_features = CPU_FTRS_POWER8,
502 .cpu_user_features = COMMON_USER_POWER8, 520 .cpu_user_features = COMMON_USER_POWER8,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 39954fe941b8..55593ee2d5aa 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -231,7 +231,7 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
231void eeh_slot_error_detail(struct eeh_pe *pe, int severity) 231void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
232{ 232{
233 size_t loglen = 0; 233 size_t loglen = 0;
234 struct eeh_dev *edev; 234 struct eeh_dev *edev, *tmp;
235 bool valid_cfg_log = true; 235 bool valid_cfg_log = true;
236 236
237 /* 237 /*
@@ -251,7 +251,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
251 eeh_pe_restore_bars(pe); 251 eeh_pe_restore_bars(pe);
252 252
253 pci_regs_buf[0] = 0; 253 pci_regs_buf[0] = 0;
254 eeh_pe_for_each_dev(pe, edev) { 254 eeh_pe_for_each_dev(pe, edev, tmp) {
255 loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen, 255 loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen,
256 EEH_PCI_REGS_LOG_LEN - loglen); 256 EEH_PCI_REGS_LOG_LEN - loglen);
257 } 257 }
@@ -499,8 +499,6 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
499 } 499 }
500 500
501 eeh_dev_check_failure(edev); 501 eeh_dev_check_failure(edev);
502
503 pci_dev_put(eeh_dev_to_pci_dev(edev));
504 return val; 502 return val;
505} 503}
506 504
@@ -838,7 +836,7 @@ core_initcall_sync(eeh_init);
838 * on the CEC architecture, type of the device, on earlier boot 836 * on the CEC architecture, type of the device, on earlier boot
839 * command-line arguments & etc. 837 * command-line arguments & etc.
840 */ 838 */
841static void eeh_add_device_early(struct device_node *dn) 839void eeh_add_device_early(struct device_node *dn)
842{ 840{
843 struct pci_controller *phb; 841 struct pci_controller *phb;
844 842
@@ -886,7 +884,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
886 * This routine must be used to complete EEH initialization for PCI 884 * This routine must be used to complete EEH initialization for PCI
887 * devices that were added after system boot (e.g. hotplug, dlpar). 885 * devices that were added after system boot (e.g. hotplug, dlpar).
888 */ 886 */
889static void eeh_add_device_late(struct pci_dev *dev) 887void eeh_add_device_late(struct pci_dev *dev)
890{ 888{
891 struct device_node *dn; 889 struct device_node *dn;
892 struct eeh_dev *edev; 890 struct eeh_dev *edev;
@@ -902,9 +900,23 @@ static void eeh_add_device_late(struct pci_dev *dev)
902 pr_debug("EEH: Already referenced !\n"); 900 pr_debug("EEH: Already referenced !\n");
903 return; 901 return;
904 } 902 }
905 WARN_ON(edev->pdev);
906 903
907 pci_dev_get(dev); 904 /*
905 * The EEH cache might not be removed correctly because of
906 * unbalanced kref to the device during unplug time, which
907 * relies on pcibios_release_device(). So we have to remove
908 * that here explicitly.
909 */
910 if (edev->pdev) {
911 eeh_rmv_from_parent_pe(edev);
912 eeh_addr_cache_rmv_dev(edev->pdev);
913 eeh_sysfs_remove_device(edev->pdev);
914 edev->mode &= ~EEH_DEV_SYSFS;
915
916 edev->pdev = NULL;
917 dev->dev.archdata.edev = NULL;
918 }
919
908 edev->pdev = dev; 920 edev->pdev = dev;
909 dev->dev.archdata.edev = edev; 921 dev->dev.archdata.edev = edev;
910 922
@@ -967,7 +979,6 @@ EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
967/** 979/**
968 * eeh_remove_device - Undo EEH setup for the indicated pci device 980 * eeh_remove_device - Undo EEH setup for the indicated pci device
969 * @dev: pci device to be removed 981 * @dev: pci device to be removed
970 * @purge_pe: remove the PE or not
971 * 982 *
972 * This routine should be called when a device is removed from 983 * This routine should be called when a device is removed from
973 * a running system (e.g. by hotplug or dlpar). It unregisters 984 * a running system (e.g. by hotplug or dlpar). It unregisters
@@ -975,7 +986,7 @@ EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
975 * this device will no longer be detected after this call; thus, 986 * this device will no longer be detected after this call; thus,
976 * i/o errors affecting this slot may leave this device unusable. 987 * i/o errors affecting this slot may leave this device unusable.
977 */ 988 */
978static void eeh_remove_device(struct pci_dev *dev, int purge_pe) 989void eeh_remove_device(struct pci_dev *dev)
979{ 990{
980 struct eeh_dev *edev; 991 struct eeh_dev *edev;
981 992
@@ -986,42 +997,29 @@ static void eeh_remove_device(struct pci_dev *dev, int purge_pe)
986 /* Unregister the device with the EEH/PCI address search system */ 997 /* Unregister the device with the EEH/PCI address search system */
987 pr_debug("EEH: Removing device %s\n", pci_name(dev)); 998 pr_debug("EEH: Removing device %s\n", pci_name(dev));
988 999
989 if (!edev || !edev->pdev) { 1000 if (!edev || !edev->pdev || !edev->pe) {
990 pr_debug("EEH: Not referenced !\n"); 1001 pr_debug("EEH: Not referenced !\n");
991 return; 1002 return;
992 } 1003 }
1004
1005 /*
1006 * During the hotplug for EEH error recovery, we need the EEH
1007 * device attached to the parent PE in order for BAR restore
1008 * a bit later. So we keep it for BAR restore and remove it
1009 * from the parent PE during the BAR resotre.
1010 */
993 edev->pdev = NULL; 1011 edev->pdev = NULL;
994 dev->dev.archdata.edev = NULL; 1012 dev->dev.archdata.edev = NULL;
995 pci_dev_put(dev); 1013 if (!(edev->pe->state & EEH_PE_KEEP))
1014 eeh_rmv_from_parent_pe(edev);
1015 else
1016 edev->mode |= EEH_DEV_DISCONNECTED;
996 1017
997 eeh_rmv_from_parent_pe(edev, purge_pe);
998 eeh_addr_cache_rmv_dev(dev); 1018 eeh_addr_cache_rmv_dev(dev);
999 eeh_sysfs_remove_device(dev); 1019 eeh_sysfs_remove_device(dev);
1020 edev->mode &= ~EEH_DEV_SYSFS;
1000} 1021}
1001 1022
1002/**
1003 * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
1004 * @dev: PCI device
1005 * @purge_pe: remove the corresponding PE or not
1006 *
1007 * This routine must be called when a device is removed from the
1008 * running system through hotplug or dlpar. The corresponding
1009 * PCI address cache will be removed.
1010 */
1011void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe)
1012{
1013 struct pci_bus *bus = dev->subordinate;
1014 struct pci_dev *child, *tmp;
1015
1016 eeh_remove_device(dev, purge_pe);
1017
1018 if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1019 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
1020 eeh_remove_bus_device(child, purge_pe);
1021 }
1022}
1023EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
1024
1025static int proc_eeh_show(struct seq_file *m, void *v) 1023static int proc_eeh_show(struct seq_file *m, void *v)
1026{ 1024{
1027 if (0 == eeh_subsystem_enabled) { 1025 if (0 == eeh_subsystem_enabled) {
@@ -1063,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = {
1063 1061
1064static int __init eeh_init_proc(void) 1062static int __init eeh_init_proc(void)
1065{ 1063{
1066 if (machine_is(pseries)) 1064 if (machine_is(pseries) || machine_is(powernv))
1067 proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); 1065 proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
1068 return 0; 1066 return 0;
1069} 1067}
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index f9ac1232a746..e8c9fd546a5c 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -68,16 +68,12 @@ static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
68 struct pci_io_addr_range *piar; 68 struct pci_io_addr_range *piar;
69 piar = rb_entry(n, struct pci_io_addr_range, rb_node); 69 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
70 70
71 if (addr < piar->addr_lo) { 71 if (addr < piar->addr_lo)
72 n = n->rb_left; 72 n = n->rb_left;
73 } else { 73 else if (addr > piar->addr_hi)
74 if (addr > piar->addr_hi) { 74 n = n->rb_right;
75 n = n->rb_right; 75 else
76 } else { 76 return piar->edev;
77 pci_dev_get(piar->pcidev);
78 return piar->edev;
79 }
80 }
81 } 77 }
82 78
83 return NULL; 79 return NULL;
@@ -156,7 +152,6 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
156 if (!piar) 152 if (!piar)
157 return NULL; 153 return NULL;
158 154
159 pci_dev_get(dev);
160 piar->addr_lo = alo; 155 piar->addr_lo = alo;
161 piar->addr_hi = ahi; 156 piar->addr_hi = ahi;
162 piar->edev = pci_dev_to_eeh_dev(dev); 157 piar->edev = pci_dev_to_eeh_dev(dev);
@@ -250,7 +245,6 @@ restart:
250 245
251 if (piar->pcidev == dev) { 246 if (piar->pcidev == dev) {
252 rb_erase(n, &pci_io_addr_cache_root.rb_root); 247 rb_erase(n, &pci_io_addr_cache_root.rb_root);
253 pci_dev_put(piar->pcidev);
254 kfree(piar); 248 kfree(piar);
255 goto restart; 249 goto restart;
256 } 250 }
@@ -302,12 +296,10 @@ void eeh_addr_cache_build(void)
302 if (!edev) 296 if (!edev)
303 continue; 297 continue;
304 298
305 pci_dev_get(dev); /* matching put is in eeh_remove_device() */
306 dev->dev.archdata.edev = edev; 299 dev->dev.archdata.edev = edev;
307 edev->pdev = dev; 300 edev->pdev = dev;
308 301
309 eeh_addr_cache_insert_dev(dev); 302 eeh_addr_cache_insert_dev(dev);
310
311 eeh_sysfs_add_device(dev); 303 eeh_sysfs_add_device(dev);
312 } 304 }
313 305
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 2b1ce17cae50..36bed5a12750 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -143,10 +143,14 @@ static void eeh_disable_irq(struct pci_dev *dev)
143static void eeh_enable_irq(struct pci_dev *dev) 143static void eeh_enable_irq(struct pci_dev *dev)
144{ 144{
145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
146 struct irq_desc *desc;
146 147
147 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { 148 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
148 edev->mode &= ~EEH_DEV_IRQ_DISABLED; 149 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
149 enable_irq(dev->irq); 150
151 desc = irq_to_desc(dev->irq);
152 if (desc && desc->depth > 0)
153 enable_irq(dev->irq);
150 } 154 }
151} 155}
152 156
@@ -338,6 +342,54 @@ static void *eeh_report_failure(void *data, void *userdata)
338 return NULL; 342 return NULL;
339} 343}
340 344
345static void *eeh_rmv_device(void *data, void *userdata)
346{
347 struct pci_driver *driver;
348 struct eeh_dev *edev = (struct eeh_dev *)data;
349 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
350 int *removed = (int *)userdata;
351
352 /*
353 * Actually, we should remove the PCI bridges as well.
354 * However, that's lots of complexity to do that,
355 * particularly some of devices under the bridge might
356 * support EEH. So we just care about PCI devices for
357 * simplicity here.
358 */
359 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
360 return NULL;
361 driver = eeh_pcid_get(dev);
362 if (driver && driver->err_handler)
363 return NULL;
364
365 /* Remove it from PCI subsystem */
366 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
367 pci_name(dev));
368 edev->bus = dev->bus;
369 edev->mode |= EEH_DEV_DISCONNECTED;
370 (*removed)++;
371
372 pci_stop_and_remove_bus_device(dev);
373
374 return NULL;
375}
376
377static void *eeh_pe_detach_dev(void *data, void *userdata)
378{
379 struct eeh_pe *pe = (struct eeh_pe *)data;
380 struct eeh_dev *edev, *tmp;
381
382 eeh_pe_for_each_dev(pe, edev, tmp) {
383 if (!(edev->mode & EEH_DEV_DISCONNECTED))
384 continue;
385
386 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
387 eeh_rmv_from_parent_pe(edev);
388 }
389
390 return NULL;
391}
392
341/** 393/**
342 * eeh_reset_device - Perform actual reset of a pci slot 394 * eeh_reset_device - Perform actual reset of a pci slot
343 * @pe: EEH PE 395 * @pe: EEH PE
@@ -349,8 +401,9 @@ static void *eeh_report_failure(void *data, void *userdata)
349 */ 401 */
350static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) 402static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
351{ 403{
404 struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
352 struct timeval tstamp; 405 struct timeval tstamp;
353 int cnt, rc; 406 int cnt, rc, removed = 0;
354 407
355 /* pcibios will clear the counter; save the value */ 408 /* pcibios will clear the counter; save the value */
356 cnt = pe->freeze_count; 409 cnt = pe->freeze_count;
@@ -362,8 +415,11 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
362 * devices are expected to be attached soon when calling 415 * devices are expected to be attached soon when calling
363 * into pcibios_add_pci_devices(). 416 * into pcibios_add_pci_devices().
364 */ 417 */
418 eeh_pe_state_mark(pe, EEH_PE_KEEP);
365 if (bus) 419 if (bus)
366 __pcibios_remove_pci_devices(bus, 0); 420 pcibios_remove_pci_devices(bus);
421 else if (frozen_bus)
422 eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
367 423
368 /* Reset the pci controller. (Asserts RST#; resets config space). 424 /* Reset the pci controller. (Asserts RST#; resets config space).
369 * Reconfigure bridges and devices. Don't try to bring the system 425 * Reconfigure bridges and devices. Don't try to bring the system
@@ -384,9 +440,24 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
384 * potentially weird things happen. 440 * potentially weird things happen.
385 */ 441 */
386 if (bus) { 442 if (bus) {
443 pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
387 ssleep(5); 444 ssleep(5);
445
446 /*
447 * The EEH device is still connected with its parent
448 * PE. We should disconnect it so the binding can be
449 * rebuilt when adding PCI devices.
450 */
451 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
388 pcibios_add_pci_devices(bus); 452 pcibios_add_pci_devices(bus);
453 } else if (frozen_bus && removed) {
454 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
455 ssleep(5);
456
457 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
458 pcibios_add_pci_devices(frozen_bus);
389 } 459 }
460 eeh_pe_state_clear(pe, EEH_PE_KEEP);
390 461
391 pe->tstamp = tstamp; 462 pe->tstamp = tstamp;
392 pe->freeze_count = cnt; 463 pe->freeze_count = cnt;
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 016588a6f5ed..f9450537e335 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -149,8 +149,8 @@ static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe,
149 * callback returns something other than NULL, or no more PEs 149 * callback returns something other than NULL, or no more PEs
150 * to be traversed. 150 * to be traversed.
151 */ 151 */
152static void *eeh_pe_traverse(struct eeh_pe *root, 152void *eeh_pe_traverse(struct eeh_pe *root,
153 eeh_traverse_func fn, void *flag) 153 eeh_traverse_func fn, void *flag)
154{ 154{
155 struct eeh_pe *pe; 155 struct eeh_pe *pe;
156 void *ret; 156 void *ret;
@@ -176,7 +176,7 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
176 eeh_traverse_func fn, void *flag) 176 eeh_traverse_func fn, void *flag)
177{ 177{
178 struct eeh_pe *pe; 178 struct eeh_pe *pe;
179 struct eeh_dev *edev; 179 struct eeh_dev *edev, *tmp;
180 void *ret; 180 void *ret;
181 181
182 if (!root) { 182 if (!root) {
@@ -186,7 +186,7 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
186 186
187 /* Traverse root PE */ 187 /* Traverse root PE */
188 for (pe = root; pe; pe = eeh_pe_next(pe, root)) { 188 for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
189 eeh_pe_for_each_dev(pe, edev) { 189 eeh_pe_for_each_dev(pe, edev, tmp) {
190 ret = fn(edev, flag); 190 ret = fn(edev, flag);
191 if (ret) 191 if (ret)
192 return ret; 192 return ret;
@@ -333,7 +333,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
333 while (parent) { 333 while (parent) {
334 if (!(parent->type & EEH_PE_INVALID)) 334 if (!(parent->type & EEH_PE_INVALID))
335 break; 335 break;
336 parent->type &= ~EEH_PE_INVALID; 336 parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
337 parent = parent->parent; 337 parent = parent->parent;
338 } 338 }
339 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", 339 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
@@ -397,21 +397,20 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
397/** 397/**
398 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE 398 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE
399 * @edev: EEH device 399 * @edev: EEH device
400 * @purge_pe: remove PE or not
401 * 400 *
402 * The PE hierarchy tree might be changed when doing PCI hotplug. 401 * The PE hierarchy tree might be changed when doing PCI hotplug.
403 * Also, the PCI devices or buses could be removed from the system 402 * Also, the PCI devices or buses could be removed from the system
404 * during EEH recovery. So we have to call the function remove the 403 * during EEH recovery. So we have to call the function remove the
405 * corresponding PE accordingly if necessary. 404 * corresponding PE accordingly if necessary.
406 */ 405 */
407int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe) 406int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
408{ 407{
409 struct eeh_pe *pe, *parent, *child; 408 struct eeh_pe *pe, *parent, *child;
410 int cnt; 409 int cnt;
411 410
412 if (!edev->pe) { 411 if (!edev->pe) {
413 pr_warning("%s: No PE found for EEH device %s\n", 412 pr_debug("%s: No PE found for EEH device %s\n",
414 __func__, edev->dn->full_name); 413 __func__, edev->dn->full_name);
415 return -EEXIST; 414 return -EEXIST;
416 } 415 }
417 416
@@ -431,7 +430,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
431 if (pe->type & EEH_PE_PHB) 430 if (pe->type & EEH_PE_PHB)
432 break; 431 break;
433 432
434 if (purge_pe) { 433 if (!(pe->state & EEH_PE_KEEP)) {
435 if (list_empty(&pe->edevs) && 434 if (list_empty(&pe->edevs) &&
436 list_empty(&pe->child_list)) { 435 list_empty(&pe->child_list)) {
437 list_del(&pe->child); 436 list_del(&pe->child);
@@ -502,7 +501,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
502{ 501{
503 struct eeh_pe *pe = (struct eeh_pe *)data; 502 struct eeh_pe *pe = (struct eeh_pe *)data;
504 int state = *((int *)flag); 503 int state = *((int *)flag);
505 struct eeh_dev *tmp; 504 struct eeh_dev *edev, *tmp;
506 struct pci_dev *pdev; 505 struct pci_dev *pdev;
507 506
508 /* 507 /*
@@ -512,8 +511,8 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
512 * the PCI device driver. 511 * the PCI device driver.
513 */ 512 */
514 pe->state |= state; 513 pe->state |= state;
515 eeh_pe_for_each_dev(pe, tmp) { 514 eeh_pe_for_each_dev(pe, edev, tmp) {
516 pdev = eeh_dev_to_pci_dev(tmp); 515 pdev = eeh_dev_to_pci_dev(edev);
517 if (pdev) 516 if (pdev)
518 pdev->error_state = pci_channel_io_frozen; 517 pdev->error_state = pci_channel_io_frozen;
519 } 518 }
@@ -579,7 +578,7 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state)
579 * blocked on normal path during the stage. So we need utilize 578 * blocked on normal path during the stage. So we need utilize
580 * eeh operations, which is always permitted. 579 * eeh operations, which is always permitted.
581 */ 580 */
582static void eeh_bridge_check_link(struct pci_dev *pdev, 581static void eeh_bridge_check_link(struct eeh_dev *edev,
583 struct device_node *dn) 582 struct device_node *dn)
584{ 583{
585 int cap; 584 int cap;
@@ -590,16 +589,17 @@ static void eeh_bridge_check_link(struct pci_dev *pdev,
590 * We only check root port and downstream ports of 589 * We only check root port and downstream ports of
591 * PCIe switches 590 * PCIe switches
592 */ 591 */
593 if (!pci_is_pcie(pdev) || 592 if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT)))
594 (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
595 pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
596 return; 593 return;
597 594
598 pr_debug("%s: Check PCIe link for %s ...\n", 595 pr_debug("%s: Check PCIe link for %04x:%02x:%02x.%01x ...\n",
599 __func__, pci_name(pdev)); 596 __func__, edev->phb->global_number,
597 edev->config_addr >> 8,
598 PCI_SLOT(edev->config_addr & 0xFF),
599 PCI_FUNC(edev->config_addr & 0xFF));
600 600
601 /* Check slot status */ 601 /* Check slot status */
602 cap = pdev->pcie_cap; 602 cap = edev->pcie_cap;
603 eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val); 603 eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val);
604 if (!(val & PCI_EXP_SLTSTA_PDS)) { 604 if (!(val & PCI_EXP_SLTSTA_PDS)) {
605 pr_debug(" No card in the slot (0x%04x) !\n", val); 605 pr_debug(" No card in the slot (0x%04x) !\n", val);
@@ -653,8 +653,7 @@ static void eeh_bridge_check_link(struct pci_dev *pdev,
653#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) 653#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
654#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) 654#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
655 655
656static void eeh_restore_bridge_bars(struct pci_dev *pdev, 656static void eeh_restore_bridge_bars(struct eeh_dev *edev,
657 struct eeh_dev *edev,
658 struct device_node *dn) 657 struct device_node *dn)
659{ 658{
660 int i; 659 int i;
@@ -680,7 +679,7 @@ static void eeh_restore_bridge_bars(struct pci_dev *pdev,
680 eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]); 679 eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]);
681 680
682 /* Check the PCIe link is ready */ 681 /* Check the PCIe link is ready */
683 eeh_bridge_check_link(pdev, dn); 682 eeh_bridge_check_link(edev, dn);
684} 683}
685 684
686static void eeh_restore_device_bars(struct eeh_dev *edev, 685static void eeh_restore_device_bars(struct eeh_dev *edev,
@@ -729,19 +728,12 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
729 */ 728 */
730static void *eeh_restore_one_device_bars(void *data, void *flag) 729static void *eeh_restore_one_device_bars(void *data, void *flag)
731{ 730{
732 struct pci_dev *pdev = NULL;
733 struct eeh_dev *edev = (struct eeh_dev *)data; 731 struct eeh_dev *edev = (struct eeh_dev *)data;
734 struct device_node *dn = eeh_dev_to_of_node(edev); 732 struct device_node *dn = eeh_dev_to_of_node(edev);
735 733
736 /* Trace the PCI bridge */ 734 /* Do special restore for bridges */
737 if (eeh_probe_mode_dev()) { 735 if (edev->mode & EEH_DEV_BRIDGE)
738 pdev = eeh_dev_to_pci_dev(edev); 736 eeh_restore_bridge_bars(edev, dn);
739 if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
740 pdev = NULL;
741 }
742
743 if (pdev)
744 eeh_restore_bridge_bars(pdev, edev, dn);
745 else 737 else
746 eeh_restore_device_bars(edev, dn); 738 eeh_restore_device_bars(edev, dn);
747 739
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index e7ae3484918c..5d753d4f2c75 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -56,19 +56,40 @@ EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
56 56
57void eeh_sysfs_add_device(struct pci_dev *pdev) 57void eeh_sysfs_add_device(struct pci_dev *pdev)
58{ 58{
59 struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
59 int rc=0; 60 int rc=0;
60 61
62 if (edev && (edev->mode & EEH_DEV_SYSFS))
63 return;
64
61 rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); 65 rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
62 rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); 66 rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
63 rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); 67 rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
64 68
65 if (rc) 69 if (rc)
66 printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); 70 printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
71 else if (edev)
72 edev->mode |= EEH_DEV_SYSFS;
67} 73}
68 74
69void eeh_sysfs_remove_device(struct pci_dev *pdev) 75void eeh_sysfs_remove_device(struct pci_dev *pdev)
70{ 76{
77 struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
78
79 /*
80 * The parent directory might have been removed. We needn't
81 * continue for that case.
82 */
83 if (!pdev->dev.kobj.sd) {
84 if (edev)
85 edev->mode &= ~EEH_DEV_SYSFS;
86 return;
87 }
88
71 device_remove_file(&pdev->dev, &dev_attr_eeh_mode); 89 device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
72 device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); 90 device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
73 device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); 91 device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
92
93 if (edev)
94 edev->mode &= ~EEH_DEV_SYSFS;
74} 95}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ab15b8d057ad..2bd0b885b0fe 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
449 449
450#ifdef CONFIG_PPC_BOOK3S_64 450#ifdef CONFIG_PPC_BOOK3S_64
451BEGIN_FTR_SECTION 451BEGIN_FTR_SECTION
452 /*
453 * Back up the TAR across context switches. Note that the TAR is not
454 * available for use in the kernel. (To provide this, the TAR should
455 * be backed up/restored on exception entry/exit instead, and be in
456 * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
457 */
458 mfspr r0,SPRN_TAR
459 std r0,THREAD_TAR(r3)
460
461 /* Event based branch registers */ 452 /* Event based branch registers */
462 mfspr r0, SPRN_BESCR 453 mfspr r0, SPRN_BESCR
463 std r0, THREAD_BESCR(r3) 454 std r0, THREAD_BESCR(r3)
@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
584 ld r7,DSCR_DEFAULT@toc(2) 575 ld r7,DSCR_DEFAULT@toc(2)
585 ld r0,THREAD_DSCR(r4) 576 ld r0,THREAD_DSCR(r4)
586 cmpwi r6,0 577 cmpwi r6,0
578 li r8, FSCR_DSCR
587 bne 1f 579 bne 1f
588 ld r0,0(r7) 580 ld r0,0(r7)
5891: cmpd r0,r25 581 b 3f
5821:
583 BEGIN_FTR_SECTION_NESTED(70)
584 mfspr r6, SPRN_FSCR
585 or r6, r6, r8
586 mtspr SPRN_FSCR, r6
587 BEGIN_FTR_SECTION_NESTED(69)
588 mfspr r6, SPRN_HFSCR
589 or r6, r6, r8
590 mtspr SPRN_HFSCR, r6
591 END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
592 b 4f
593 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
5943:
595 BEGIN_FTR_SECTION_NESTED(70)
596 mfspr r6, SPRN_FSCR
597 andc r6, r6, r8
598 mtspr SPRN_FSCR, r6
599 BEGIN_FTR_SECTION_NESTED(69)
600 mfspr r6, SPRN_HFSCR
601 andc r6, r6, r8
602 mtspr SPRN_HFSCR, r6
603 END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
604 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
6054: cmpd r0,r25
590 beq 2f 606 beq 2f
591 mtspr SPRN_DSCR,r0 607 mtspr SPRN_DSCR,r0
5922: 6082:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4e00d223b2e3..902ca3c6b4b6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline:
848 . = 0x4f80 848 . = 0x4f80
849 SET_SCRATCH0(r13) 849 SET_SCRATCH0(r13)
850 EXCEPTION_PROLOG_0(PACA_EXGEN) 850 EXCEPTION_PROLOG_0(PACA_EXGEN)
851 b facility_unavailable_relon_hv 851 b hv_facility_unavailable_relon_hv
852 852
853 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) 853 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
854#ifdef CONFIG_PPC_DENORMALISATION 854#ifdef CONFIG_PPC_DENORMALISATION
@@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1175 b .ret_from_except 1175 b .ret_from_except
1176 1176
1177 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) 1177 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
1178 STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
1178 1179
1179 .align 7 1180 .align 7
1180 .globl __end_handlers 1181 .globl __end_handlers
@@ -1188,7 +1189,7 @@ __end_handlers:
1188 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 1189 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1189 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 1190 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1190 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 1191 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1191 STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable) 1192 STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1192 1193
1193#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1194#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1194/* 1195/*
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 2e51cde616d2..c69440cef7af 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -362,7 +362,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
362 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); 362 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
363 seq_printf(p, " Spurious interrupts\n"); 363 seq_printf(p, " Spurious interrupts\n");
364 364
365 seq_printf(p, "%*s: ", prec, "CNT"); 365 seq_printf(p, "%*s: ", prec, "PMI");
366 for_each_online_cpu(j) 366 for_each_online_cpu(j)
367 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 367 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
368 seq_printf(p, " Performance monitoring interrupts\n"); 368 seq_printf(p, " Performance monitoring interrupts\n");
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f46914a0f33e..7d22a675fe1a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1462,6 +1462,8 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1462 /* Allocate bus and devices resources */ 1462 /* Allocate bus and devices resources */
1463 pcibios_allocate_bus_resources(bus); 1463 pcibios_allocate_bus_resources(bus);
1464 pcibios_claim_one_bus(bus); 1464 pcibios_claim_one_bus(bus);
1465 if (!pci_has_flag(PCI_PROBE_ONLY))
1466 pci_assign_unassigned_bus_resources(bus);
1465 1467
1466 /* Fixup EEH */ 1468 /* Fixup EEH */
1467 eeh_add_device_tree_late(bus); 1469 eeh_add_device_tree_late(bus);
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 3f608800c06b..c1e17ae68a08 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -22,45 +22,40 @@
22#include <asm/eeh.h> 22#include <asm/eeh.h>
23 23
24/** 24/**
25 * __pcibios_remove_pci_devices - remove all devices under this bus 25 * pcibios_release_device - release PCI device
26 * @dev: PCI device
27 *
28 * The function is called before releasing the indicated PCI device.
29 */
30void pcibios_release_device(struct pci_dev *dev)
31{
32 eeh_remove_device(dev);
33}
34
35/**
36 * pcibios_remove_pci_devices - remove all devices under this bus
26 * @bus: the indicated PCI bus 37 * @bus: the indicated PCI bus
27 * @purge_pe: destroy the PE on removal of PCI devices
28 * 38 *
29 * Remove all of the PCI devices under this bus both from the 39 * Remove all of the PCI devices under this bus both from the
30 * linux pci device tree, and from the powerpc EEH address cache. 40 * linux pci device tree, and from the powerpc EEH address cache.
31 * By default, the corresponding PE will be destroied during the
32 * normal PCI hotplug path. For PCI hotplug during EEH recovery,
33 * the corresponding PE won't be destroied and deallocated.
34 */ 41 */
35void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe) 42void pcibios_remove_pci_devices(struct pci_bus *bus)
36{ 43{
37 struct pci_dev *dev, *tmp; 44 struct pci_dev *dev, *tmp;
38 struct pci_bus *child_bus; 45 struct pci_bus *child_bus;
39 46
40 /* First go down child busses */ 47 /* First go down child busses */
41 list_for_each_entry(child_bus, &bus->children, node) 48 list_for_each_entry(child_bus, &bus->children, node)
42 __pcibios_remove_pci_devices(child_bus, purge_pe); 49 pcibios_remove_pci_devices(child_bus);
43 50
44 pr_debug("PCI: Removing devices on bus %04x:%02x\n", 51 pr_debug("PCI: Removing devices on bus %04x:%02x\n",
45 pci_domain_nr(bus), bus->number); 52 pci_domain_nr(bus), bus->number);
46 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { 53 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
47 pr_debug(" * Removing %s...\n", pci_name(dev)); 54 pr_debug(" Removing %s...\n", pci_name(dev));
48 eeh_remove_bus_device(dev, purge_pe);
49 pci_stop_and_remove_bus_device(dev); 55 pci_stop_and_remove_bus_device(dev);
50 } 56 }
51} 57}
52 58
53/**
54 * pcibios_remove_pci_devices - remove all devices under this bus
55 * @bus: the indicated PCI bus
56 *
57 * Remove all of the PCI devices under this bus both from the
58 * linux pci device tree, and from the powerpc EEH address cache.
59 */
60void pcibios_remove_pci_devices(struct pci_bus *bus)
61{
62 __pcibios_remove_pci_devices(bus, 1);
63}
64EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); 59EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
65 60
66/** 61/**
@@ -76,7 +71,7 @@ EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
76 */ 71 */
77void pcibios_add_pci_devices(struct pci_bus * bus) 72void pcibios_add_pci_devices(struct pci_bus * bus)
78{ 73{
79 int slotno, num, mode, pass, max; 74 int slotno, mode, pass, max;
80 struct pci_dev *dev; 75 struct pci_dev *dev;
81 struct device_node *dn = pci_bus_to_OF_node(bus); 76 struct device_node *dn = pci_bus_to_OF_node(bus);
82 77
@@ -90,11 +85,15 @@ void pcibios_add_pci_devices(struct pci_bus * bus)
90 /* use ofdt-based probe */ 85 /* use ofdt-based probe */
91 of_rescan_bus(dn, bus); 86 of_rescan_bus(dn, bus);
92 } else if (mode == PCI_PROBE_NORMAL) { 87 } else if (mode == PCI_PROBE_NORMAL) {
93 /* use legacy probe */ 88 /*
89 * Use legacy probe. In the partial hotplug case, we
90 * probably have grandchildren devices unplugged. So
91 * we don't check the return value from pci_scan_slot() in
92 * order for fully rescan all the way down to pick them up.
93 * They can have been removed during partial hotplug.
94 */
94 slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); 95 slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
95 num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); 96 pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
96 if (!num)
97 return;
98 pcibios_setup_bus_devices(bus); 97 pcibios_setup_bus_devices(bus);
99 max = bus->busn_res.start; 98 max = bus->busn_res.start;
100 for (pass = 0; pass < 2; pass++) { 99 for (pass = 0; pass < 2; pass++) {
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 6b0ba5854d99..15d9105323bf 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -230,11 +230,14 @@ void of_scan_pci_bridge(struct pci_dev *dev)
230 return; 230 return;
231 } 231 }
232 232
233 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 233 bus = pci_find_bus(pci_domain_nr(dev->bus), busrange[0]);
234 if (!bus) { 234 if (!bus) {
235 printk(KERN_ERR "Failed to create pci bus for %s\n", 235 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
236 node->full_name); 236 if (!bus) {
237 return; 237 printk(KERN_ERR "Failed to create pci bus for %s\n",
238 node->full_name);
239 return;
240 }
238 } 241 }
239 242
240 bus->primary = dev->bus->number; 243 bus->primary = dev->bus->number;
@@ -292,6 +295,38 @@ void of_scan_pci_bridge(struct pci_dev *dev)
292} 295}
293EXPORT_SYMBOL(of_scan_pci_bridge); 296EXPORT_SYMBOL(of_scan_pci_bridge);
294 297
298static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
299 struct device_node *dn)
300{
301 struct pci_dev *dev = NULL;
302 const u32 *reg;
303 int reglen, devfn;
304
305 pr_debug(" * %s\n", dn->full_name);
306 if (!of_device_is_available(dn))
307 return NULL;
308
309 reg = of_get_property(dn, "reg", &reglen);
310 if (reg == NULL || reglen < 20)
311 return NULL;
312 devfn = (reg[0] >> 8) & 0xff;
313
314 /* Check if the PCI device is already there */
315 dev = pci_get_slot(bus, devfn);
316 if (dev) {
317 pci_dev_put(dev);
318 return dev;
319 }
320
321 /* create a new pci_dev for this device */
322 dev = of_create_pci_dev(dn, bus, devfn);
323 if (!dev)
324 return NULL;
325
326 pr_debug(" dev header type: %x\n", dev->hdr_type);
327 return dev;
328}
329
295/** 330/**
296 * __of_scan_bus - given a PCI bus node, setup bus and scan for child devices 331 * __of_scan_bus - given a PCI bus node, setup bus and scan for child devices
297 * @node: device tree node for the PCI bus 332 * @node: device tree node for the PCI bus
@@ -302,8 +337,6 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
302 int rescan_existing) 337 int rescan_existing)
303{ 338{
304 struct device_node *child; 339 struct device_node *child;
305 const u32 *reg;
306 int reglen, devfn;
307 struct pci_dev *dev; 340 struct pci_dev *dev;
308 341
309 pr_debug("of_scan_bus(%s) bus no %d...\n", 342 pr_debug("of_scan_bus(%s) bus no %d...\n",
@@ -311,16 +344,7 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
311 344
312 /* Scan direct children */ 345 /* Scan direct children */
313 for_each_child_of_node(node, child) { 346 for_each_child_of_node(node, child) {
314 pr_debug(" * %s\n", child->full_name); 347 dev = of_scan_pci_dev(bus, child);
315 if (!of_device_is_available(child))
316 continue;
317 reg = of_get_property(child, "reg", &reglen);
318 if (reg == NULL || reglen < 20)
319 continue;
320 devfn = (reg[0] >> 8) & 0xff;
321
322 /* create a new pci_dev for this device */
323 dev = of_create_pci_dev(child, bus, devfn);
324 if (!dev) 348 if (!dev)
325 continue; 349 continue;
326 pr_debug(" dev header type: %x\n", dev->hdr_type); 350 pr_debug(" dev header type: %x\n", dev->hdr_type);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index c517dbe705fd..8083be20fe5e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
600 struct ppc64_tlb_batch *batch; 600 struct ppc64_tlb_batch *batch;
601#endif 601#endif
602 602
603 /* Back up the TAR across context switches.
604 * Note that the TAR is not available for use in the kernel. (To
605 * provide this, the TAR should be backed up/restored on exception
606 * entry/exit instead, and be in pt_regs. FIXME, this should be in
607 * pt_regs anyway (for debug).)
608 * Save the TAR here before we do treclaim/trecheckpoint as these
609 * will change the TAR.
610 */
611 save_tar(&prev->thread);
612
603 __switch_to_tm(prev); 613 __switch_to_tm(prev);
604 614
605#ifdef CONFIG_SMP 615#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5eccda9fd33f..607902424e73 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -644,7 +644,8 @@ unsigned char ibm_architecture_vec[] = {
644 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ 644 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
645 W(0xffff0000), W(0x003e0000), /* POWER6 */ 645 W(0xffff0000), W(0x003e0000), /* POWER6 */
646 W(0xffff0000), W(0x003f0000), /* POWER7 */ 646 W(0xffff0000), W(0x003f0000), /* POWER7 */
647 W(0xffff0000), W(0x004b0000), /* POWER8 */ 647 W(0xffff0000), W(0x004b0000), /* POWER8E */
648 W(0xffff0000), W(0x004d0000), /* POWER8 */
648 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ 649 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
649 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ 650 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
650 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ 651 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
@@ -706,7 +707,7 @@ unsigned char ibm_architecture_vec[] = {
706 * must match by the macro below. Update the definition if 707 * must match by the macro below. Update the definition if
707 * the structure layout changes. 708 * the structure layout changes.
708 */ 709 */
709#define IBM_ARCH_VEC_NRCORES_OFFSET 117 710#define IBM_ARCH_VEC_NRCORES_OFFSET 125
710 W(NR_CPUS), /* number of cores supported */ 711 W(NR_CPUS), /* number of cores supported */
711 0, 712 0,
712 0, 713 0,
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 51be8fb24803..0554d1f6d70d 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -233,6 +233,16 @@ dont_backup_fp:
233 std r5, _CCR(r7) 233 std r5, _CCR(r7)
234 std r6, _XER(r7) 234 std r6, _XER(r7)
235 235
236
237 /* ******************** TAR, PPR, DSCR ********** */
238 mfspr r3, SPRN_TAR
239 mfspr r4, SPRN_PPR
240 mfspr r5, SPRN_DSCR
241
242 std r3, THREAD_TM_TAR(r12)
243 std r4, THREAD_TM_PPR(r12)
244 std r5, THREAD_TM_DSCR(r12)
245
236 /* MSR and flags: We don't change CRs, and we don't need to alter 246 /* MSR and flags: We don't change CRs, and we don't need to alter
237 * MSR. 247 * MSR.
238 */ 248 */
@@ -347,6 +357,16 @@ dont_restore_fp:
347 mtmsr r6 /* FP/Vec off again! */ 357 mtmsr r6 /* FP/Vec off again! */
348 358
349restore_gprs: 359restore_gprs:
360
361 /* ******************** TAR, PPR, DSCR ********** */
362 ld r4, THREAD_TM_TAR(r3)
363 ld r5, THREAD_TM_PPR(r3)
364 ld r6, THREAD_TM_DSCR(r3)
365
366 mtspr SPRN_TAR, r4
367 mtspr SPRN_PPR, r5
368 mtspr SPRN_DSCR, r6
369
350 /* ******************** CR,LR,CCR,MSR ********** */ 370 /* ******************** CR,LR,CCR,MSR ********** */
351 ld r3, _CTR(r7) 371 ld r3, _CTR(r7)
352 ld r4, _LINK(r7) 372 ld r4, _LINK(r7)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bf33c22e38a4..e435bc089ea3 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -44,9 +44,7 @@
44#include <asm/machdep.h> 44#include <asm/machdep.h>
45#include <asm/rtas.h> 45#include <asm/rtas.h>
46#include <asm/pmc.h> 46#include <asm/pmc.h>
47#ifdef CONFIG_PPC32
48#include <asm/reg.h> 47#include <asm/reg.h>
49#endif
50#ifdef CONFIG_PMAC_BACKLIGHT 48#ifdef CONFIG_PMAC_BACKLIGHT
51#include <asm/backlight.h> 49#include <asm/backlight.h>
52#endif 50#endif
@@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs)
1296 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1294 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1297} 1295}
1298 1296
1297#ifdef CONFIG_PPC64
1299void facility_unavailable_exception(struct pt_regs *regs) 1298void facility_unavailable_exception(struct pt_regs *regs)
1300{ 1299{
1301 static char *facility_strings[] = { 1300 static char *facility_strings[] = {
1302 "FPU", 1301 [FSCR_FP_LG] = "FPU",
1303 "VMX/VSX", 1302 [FSCR_VECVSX_LG] = "VMX/VSX",
1304 "DSCR", 1303 [FSCR_DSCR_LG] = "DSCR",
1305 "PMU SPRs", 1304 [FSCR_PM_LG] = "PMU SPRs",
1306 "BHRB", 1305 [FSCR_BHRB_LG] = "BHRB",
1307 "TM", 1306 [FSCR_TM_LG] = "TM",
1308 "AT", 1307 [FSCR_EBB_LG] = "EBB",
1309 "EBB", 1308 [FSCR_TAR_LG] = "TAR",
1310 "TAR",
1311 }; 1309 };
1312 char *facility, *prefix; 1310 char *facility = "unknown";
1313 u64 value; 1311 u64 value;
1312 u8 status;
1313 bool hv;
1314 1314
1315 if (regs->trap == 0xf60) { 1315 hv = (regs->trap == 0xf80);
1316 value = mfspr(SPRN_FSCR); 1316 if (hv)
1317 prefix = "";
1318 } else {
1319 value = mfspr(SPRN_HFSCR); 1317 value = mfspr(SPRN_HFSCR);
1320 prefix = "Hypervisor "; 1318 else
1319 value = mfspr(SPRN_FSCR);
1320
1321 status = value >> 56;
1322 if (status == FSCR_DSCR_LG) {
1323 /* User is acessing the DSCR. Set the inherit bit and allow
1324 * the user to set it directly in future by setting via the
1325 * H/FSCR DSCR bit.
1326 */
1327 current->thread.dscr_inherit = 1;
1328 if (hv)
1329 mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
1330 else
1331 mtspr(SPRN_FSCR, value | FSCR_DSCR);
1332 return;
1321 } 1333 }
1322 1334
1323 value = value >> 56; 1335 if ((status < ARRAY_SIZE(facility_strings)) &&
1336 facility_strings[status])
1337 facility = facility_strings[status];
1324 1338
1325 /* We restore the interrupt state now */ 1339 /* We restore the interrupt state now */
1326 if (!arch_irq_disabled_regs(regs)) 1340 if (!arch_irq_disabled_regs(regs))
1327 local_irq_enable(); 1341 local_irq_enable();
1328 1342
1329 if (value < ARRAY_SIZE(facility_strings))
1330 facility = facility_strings[value];
1331 else
1332 facility = "unknown";
1333
1334 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1343 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
1335 prefix, facility, regs->nip, regs->msr); 1344 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
1336 1345
1337 if (user_mode(regs)) { 1346 if (user_mode(regs)) {
1338 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1347 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
@@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
1341 1350
1342 die("Unexpected facility unavailable exception", regs, SIGABRT); 1351 die("Unexpected facility unavailable exception", regs, SIGABRT);
1343} 1352}
1353#endif
1344 1354
1345#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1355#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1346 1356
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 654e479802f2..f096e72262f4 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
38#endif 38#endif
39SECTIONS 39SECTIONS
40{ 40{
41 . = 0;
42 reloc_start = .;
43
44 . = KERNELBASE; 41 . = KERNELBASE;
45 42
46/* 43/*
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2efa9dde741a..7629cd3eb91a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1809,7 +1809,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1809 rma_size <<= PAGE_SHIFT; 1809 rma_size <<= PAGE_SHIFT;
1810 rmls = lpcr_rmls(rma_size); 1810 rmls = lpcr_rmls(rma_size);
1811 err = -EINVAL; 1811 err = -EINVAL;
1812 if (rmls < 0) { 1812 if ((long)rmls < 0) {
1813 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); 1813 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
1814 goto out_srcu; 1814 goto out_srcu;
1815 } 1815 }
@@ -1874,7 +1874,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1874 /* Allocate the guest's logical partition ID */ 1874 /* Allocate the guest's logical partition ID */
1875 1875
1876 lpid = kvmppc_alloc_lpid(); 1876 lpid = kvmppc_alloc_lpid();
1877 if (lpid < 0) 1877 if ((long)lpid < 0)
1878 return -ENOMEM; 1878 return -ENOMEM;
1879 kvm->arch.lpid = lpid; 1879 kvm->arch.lpid = lpid;
1880 1880
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 19498a567a81..c6e13d9a9e15 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1047,11 +1047,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1047 if (err) 1047 if (err)
1048 goto free_shadow_vcpu; 1048 goto free_shadow_vcpu;
1049 1049
1050 err = -ENOMEM;
1050 p = __get_free_page(GFP_KERNEL|__GFP_ZERO); 1051 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1051 /* the real shared page fills the last 4k of our page */
1052 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
1053 if (!p) 1052 if (!p)
1054 goto uninit_vcpu; 1053 goto uninit_vcpu;
1054 /* the real shared page fills the last 4k of our page */
1055 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1055 1056
1056#ifdef CONFIG_PPC_BOOK3S_64 1057#ifdef CONFIG_PPC_BOOK3S_64
1057 /* default to book3s_64 (970fx) */ 1058 /* default to book3s_64 (970fx) */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 3f0c30ae4791..c33d939120c9 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -43,6 +43,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
43{ 43{
44 unsigned long va; 44 unsigned long va;
45 unsigned int penc; 45 unsigned int penc;
46 unsigned long sllp;
46 47
47 /* 48 /*
48 * We need 14 to 65 bits of va for a tlibe of 4K page 49 * We need 14 to 65 bits of va for a tlibe of 4K page
@@ -64,7 +65,9 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
64 /* clear out bits after (52) [0....52.....63] */ 65 /* clear out bits after (52) [0....52.....63] */
65 va &= ~((1ul << (64 - 52)) - 1); 66 va &= ~((1ul << (64 - 52)) - 1);
66 va |= ssize << 8; 67 va |= ssize << 8;
67 va |= mmu_psize_defs[apsize].sllp << 6; 68 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
69 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
70 va |= sllp << 5;
68 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 71 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
69 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 72 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
70 : "memory"); 73 : "memory");
@@ -98,6 +101,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
98{ 101{
99 unsigned long va; 102 unsigned long va;
100 unsigned int penc; 103 unsigned int penc;
104 unsigned long sllp;
101 105
102 /* VPN_SHIFT can be atmost 12 */ 106 /* VPN_SHIFT can be atmost 12 */
103 va = vpn << VPN_SHIFT; 107 va = vpn << VPN_SHIFT;
@@ -113,7 +117,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
113 /* clear out bits after(52) [0....52.....63] */ 117 /* clear out bits after(52) [0....52.....63] */
114 va &= ~((1ul << (64 - 52)) - 1); 118 va &= ~((1ul << (64 - 52)) - 1);
115 va |= ssize << 8; 119 va |= ssize << 8;
116 va |= mmu_psize_defs[apsize].sllp << 6; 120 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
121 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
122 va |= sllp << 5;
117 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 123 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
118 : : "r"(va) : "memory"); 124 : : "r"(va) : "memory");
119 break; 125 break;
@@ -554,6 +560,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
554 seg_off |= vpi << shift; 560 seg_off |= vpi << shift;
555 } 561 }
556 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; 562 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
563 break;
557 case MMU_SEGSIZE_1T: 564 case MMU_SEGSIZE_1T:
558 /* We only have 40 - 23 bits of seg_off in avpn */ 565 /* We only have 40 - 23 bits of seg_off in avpn */
559 seg_off = (avpn & 0x1ffff) << 23; 566 seg_off = (avpn & 0x1ffff) << 23;
@@ -563,6 +570,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
563 seg_off |= vpi << shift; 570 seg_off |= vpi << shift;
564 } 571 }
565 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; 572 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
573 break;
566 default: 574 default:
567 *vpn = size = 0; 575 *vpn = size = 0;
568 } 576 }
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 08397217e8ac..5850798826cd 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -27,6 +27,7 @@
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <asm/cputhreads.h>
30#include <asm/sparsemem.h> 31#include <asm/sparsemem.h>
31#include <asm/prom.h> 32#include <asm/prom.h>
32#include <asm/smp.h> 33#include <asm/smp.h>
@@ -1318,7 +1319,8 @@ static int update_cpu_associativity_changes_mask(void)
1318 } 1319 }
1319 } 1320 }
1320 if (changed) { 1321 if (changed) {
1321 cpumask_set_cpu(cpu, changes); 1322 cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1323 cpu = cpu_last_thread_sibling(cpu);
1322 } 1324 }
1323 } 1325 }
1324 1326
@@ -1426,7 +1428,7 @@ static int update_cpu_topology(void *data)
1426 if (!data) 1428 if (!data)
1427 return -EINVAL; 1429 return -EINVAL;
1428 1430
1429 cpu = get_cpu(); 1431 cpu = smp_processor_id();
1430 1432
1431 for (update = data; update; update = update->next) { 1433 for (update = data; update; update = update->next) {
1432 if (cpu != update->cpu) 1434 if (cpu != update->cpu)
@@ -1446,12 +1448,12 @@ static int update_cpu_topology(void *data)
1446 */ 1448 */
1447int arch_update_cpu_topology(void) 1449int arch_update_cpu_topology(void)
1448{ 1450{
1449 unsigned int cpu, changed = 0; 1451 unsigned int cpu, sibling, changed = 0;
1450 struct topology_update_data *updates, *ud; 1452 struct topology_update_data *updates, *ud;
1451 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1453 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1452 cpumask_t updated_cpus; 1454 cpumask_t updated_cpus;
1453 struct device *dev; 1455 struct device *dev;
1454 int weight, i = 0; 1456 int weight, new_nid, i = 0;
1455 1457
1456 weight = cpumask_weight(&cpu_associativity_changes_mask); 1458 weight = cpumask_weight(&cpu_associativity_changes_mask);
1457 if (!weight) 1459 if (!weight)
@@ -1464,19 +1466,46 @@ int arch_update_cpu_topology(void)
1464 cpumask_clear(&updated_cpus); 1466 cpumask_clear(&updated_cpus);
1465 1467
1466 for_each_cpu(cpu, &cpu_associativity_changes_mask) { 1468 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1467 ud = &updates[i++]; 1469 /*
1468 ud->cpu = cpu; 1470 * If siblings aren't flagged for changes, updates list
1469 vphn_get_associativity(cpu, associativity); 1471 * will be too short. Skip on this update and set for next
1470 ud->new_nid = associativity_to_nid(associativity); 1472 * update.
1471 1473 */
1472 if (ud->new_nid < 0 || !node_online(ud->new_nid)) 1474 if (!cpumask_subset(cpu_sibling_mask(cpu),
1473 ud->new_nid = first_online_node; 1475 &cpu_associativity_changes_mask)) {
1476 pr_info("Sibling bits not set for associativity "
1477 "change, cpu%d\n", cpu);
1478 cpumask_or(&cpu_associativity_changes_mask,
1479 &cpu_associativity_changes_mask,
1480 cpu_sibling_mask(cpu));
1481 cpu = cpu_last_thread_sibling(cpu);
1482 continue;
1483 }
1474 1484
1475 ud->old_nid = numa_cpu_lookup_table[cpu]; 1485 /* Use associativity from first thread for all siblings */
1476 cpumask_set_cpu(cpu, &updated_cpus); 1486 vphn_get_associativity(cpu, associativity);
1487 new_nid = associativity_to_nid(associativity);
1488 if (new_nid < 0 || !node_online(new_nid))
1489 new_nid = first_online_node;
1490
1491 if (new_nid == numa_cpu_lookup_table[cpu]) {
1492 cpumask_andnot(&cpu_associativity_changes_mask,
1493 &cpu_associativity_changes_mask,
1494 cpu_sibling_mask(cpu));
1495 cpu = cpu_last_thread_sibling(cpu);
1496 continue;
1497 }
1477 1498
1478 if (i < weight) 1499 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1479 ud->next = &updates[i]; 1500 ud = &updates[i++];
1501 ud->cpu = sibling;
1502 ud->new_nid = new_nid;
1503 ud->old_nid = numa_cpu_lookup_table[sibling];
1504 cpumask_set_cpu(sibling, &updated_cpus);
1505 if (i < weight)
1506 ud->next = &updates[i];
1507 }
1508 cpu = cpu_last_thread_sibling(cpu);
1480 } 1509 }
1481 1510
1482 stop_machine(update_cpu_topology, &updates[0], &updated_cpus); 1511 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index a3985aee77fe..eeae308cf982 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -484,7 +484,7 @@ static bool is_ebb_event(struct perf_event *event)
484 * use bit 63 of the event code for something else if they wish. 484 * use bit 63 of the event code for something else if they wish.
485 */ 485 */
486 return (ppmu->flags & PPMU_EBB) && 486 return (ppmu->flags & PPMU_EBB) &&
487 ((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1); 487 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
488} 488}
489 489
490static int ebb_event_check(struct perf_event *event) 490static int ebb_event_check(struct perf_event *event)
@@ -1252,8 +1252,11 @@ nocheck:
1252 1252
1253 ret = 0; 1253 ret = 0;
1254 out: 1254 out:
1255 if (has_branch_stack(event)) 1255 if (has_branch_stack(event)) {
1256 power_pmu_bhrb_enable(event); 1256 power_pmu_bhrb_enable(event);
1257 cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
1258 event->attr.branch_sample_type);
1259 }
1257 1260
1258 perf_pmu_enable(event->pmu); 1261 perf_pmu_enable(event->pmu);
1259 local_irq_restore(flags); 1262 local_irq_restore(flags);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 96a64d6a8bdf..2ee4a707f0df 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -118,7 +118,7 @@
118 (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ 118 (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
119 (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ 119 (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
120 (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ 120 (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
121 (EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT) | \ 121 (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \
122 EVENT_PSEL_MASK) 122 EVENT_PSEL_MASK)
123 123
124/* MMCRA IFM bits - POWER8 */ 124/* MMCRA IFM bits - POWER8 */
@@ -233,10 +233,10 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long
233 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; 233 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
234 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; 234 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
235 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; 235 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
236 ebb = (event >> EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK; 236 ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
237 237
238 /* Clear the EBB bit in the event, so event checks work below */ 238 /* Clear the EBB bit in the event, so event checks work below */
239 event &= ~(EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT); 239 event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT);
240 240
241 if (pmc) { 241 if (pmc) {
242 if (pmc > 6) 242 if (pmc > 6)
@@ -561,18 +561,13 @@ static int power8_generic_events[] = {
561static u64 power8_bhrb_filter_map(u64 branch_sample_type) 561static u64 power8_bhrb_filter_map(u64 branch_sample_type)
562{ 562{
563 u64 pmu_bhrb_filter = 0; 563 u64 pmu_bhrb_filter = 0;
564 u64 br_privilege = branch_sample_type & ONLY_PLM;
565 564
566 /* BHRB and regular PMU events share the same prvillege state 565 /* BHRB and regular PMU events share the same privilege state
567 * filter configuration. BHRB is always recorded along with a 566 * filter configuration. BHRB is always recorded along with a
568 * regular PMU event. So privilege state filter criteria for BHRB 567 * regular PMU event. As the privilege state filter is handled
569 * and the companion PMU events has to be the same. As a default 568 * in the basic PMC configuration of the accompanying regular
570 * "perf record" tool sets all privillege bits ON when no filter 569 * PMU event, we ignore any separate BHRB specific request.
571 * criteria is provided in the command line. So as along as all
572 * privillege bits are ON or they are OFF, we are good to go.
573 */ 570 */
574 if ((br_privilege != 7) && (br_privilege != 0))
575 return -1;
576 571
577 /* No branch filter requested */ 572 /* No branch filter requested */
578 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY) 573 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
@@ -621,10 +616,19 @@ static struct power_pmu power8_pmu = {
621 616
622static int __init init_power8_pmu(void) 617static int __init init_power8_pmu(void)
623{ 618{
619 int rc;
620
624 if (!cur_cpu_spec->oprofile_cpu_type || 621 if (!cur_cpu_spec->oprofile_cpu_type ||
625 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8")) 622 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
626 return -ENODEV; 623 return -ENODEV;
627 624
628 return register_power_pmu(&power8_pmu); 625 rc = register_power_pmu(&power8_pmu);
626 if (rc)
627 return rc;
628
629 /* Tell userspace that EBB is supported */
630 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
631
632 return 0;
629} 633}
630early_initcall(init_power8_pmu); 634early_initcall(init_power8_pmu);
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 969cce73055a..79663d26e6ea 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -114,7 +114,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
114 * the root bridge. So it's not reasonable to continue 114 * the root bridge. So it's not reasonable to continue
115 * the probing. 115 * the probing.
116 */ 116 */
117 if (!dn || !edev) 117 if (!dn || !edev || edev->pe)
118 return 0; 118 return 0;
119 119
120 /* Skip for PCI-ISA bridge */ 120 /* Skip for PCI-ISA bridge */
@@ -122,8 +122,19 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
122 return 0; 122 return 0;
123 123
124 /* Initialize eeh device */ 124 /* Initialize eeh device */
125 edev->class_code = dev->class; 125 edev->class_code = dev->class;
126 edev->mode = 0; 126 edev->mode &= 0xFFFFFF00;
127 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
128 edev->mode |= EEH_DEV_BRIDGE;
129 if (pci_is_pcie(dev)) {
130 edev->pcie_cap = pci_pcie_cap(dev);
131
132 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
133 edev->mode |= EEH_DEV_ROOT_PORT;
134 else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
135 edev->mode |= EEH_DEV_DS_PORT;
136 }
137
127 edev->config_addr = ((dev->bus->number << 8) | dev->devfn); 138 edev->config_addr = ((dev->bus->number << 8) | dev->devfn);
128 edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff); 139 edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
129 140
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 49b57b9f835d..d8140b125e62 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1266,7 +1266,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1266 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE); 1266 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
1267} 1267}
1268 1268
1269void pnv_pci_init_ioda2_phb(struct device_node *np) 1269void __init pnv_pci_init_ioda2_phb(struct device_node *np)
1270{ 1270{
1271 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); 1271 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
1272} 1272}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 1bd3399146ed..62b4f8025de0 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -19,7 +19,6 @@ config PPC_PSERIES
19 select ZLIB_DEFLATE 19 select ZLIB_DEFLATE
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING 21 select HAVE_CONTEXT_TRACKING
22 select HOTPLUG if SMP
23 select HOTPLUG_CPU if SMP 22 select HOTPLUG_CPU if SMP
24 default y 23 default y
25 24
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index b456b157d33d..7fbc25b1813f 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -133,6 +133,48 @@ static int pseries_eeh_init(void)
133 return 0; 133 return 0;
134} 134}
135 135
136static int pseries_eeh_cap_start(struct device_node *dn)
137{
138 struct pci_dn *pdn = PCI_DN(dn);
139 u32 status;
140
141 if (!pdn)
142 return 0;
143
144 rtas_read_config(pdn, PCI_STATUS, 2, &status);
145 if (!(status & PCI_STATUS_CAP_LIST))
146 return 0;
147
148 return PCI_CAPABILITY_LIST;
149}
150
151
152static int pseries_eeh_find_cap(struct device_node *dn, int cap)
153{
154 struct pci_dn *pdn = PCI_DN(dn);
155 int pos = pseries_eeh_cap_start(dn);
156 int cnt = 48; /* Maximal number of capabilities */
157 u32 id;
158
159 if (!pos)
160 return 0;
161
162 while (cnt--) {
163 rtas_read_config(pdn, pos, 1, &pos);
164 if (pos < 0x40)
165 break;
166 pos &= ~3;
167 rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
168 if (id == 0xff)
169 break;
170 if (id == cap)
171 return pos;
172 pos += PCI_CAP_LIST_NEXT;
173 }
174
175 return 0;
176}
177
136/** 178/**
137 * pseries_eeh_of_probe - EEH probe on the given device 179 * pseries_eeh_of_probe - EEH probe on the given device
138 * @dn: OF node 180 * @dn: OF node
@@ -146,14 +188,16 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
146{ 188{
147 struct eeh_dev *edev; 189 struct eeh_dev *edev;
148 struct eeh_pe pe; 190 struct eeh_pe pe;
191 struct pci_dn *pdn = PCI_DN(dn);
149 const u32 *class_code, *vendor_id, *device_id; 192 const u32 *class_code, *vendor_id, *device_id;
150 const u32 *regs; 193 const u32 *regs;
194 u32 pcie_flags;
151 int enable = 0; 195 int enable = 0;
152 int ret; 196 int ret;
153 197
154 /* Retrieve OF node and eeh device */ 198 /* Retrieve OF node and eeh device */
155 edev = of_node_to_eeh_dev(dn); 199 edev = of_node_to_eeh_dev(dn);
156 if (!of_device_is_available(dn)) 200 if (edev->pe || !of_device_is_available(dn))
157 return NULL; 201 return NULL;
158 202
159 /* Retrieve class/vendor/device IDs */ 203 /* Retrieve class/vendor/device IDs */
@@ -167,9 +211,26 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
167 if (dn->type && !strcmp(dn->type, "isa")) 211 if (dn->type && !strcmp(dn->type, "isa"))
168 return NULL; 212 return NULL;
169 213
170 /* Update class code and mode of eeh device */ 214 /*
215 * Update class code and mode of eeh device. We need
216 * correctly reflects that current device is root port
217 * or PCIe switch downstream port.
218 */
171 edev->class_code = *class_code; 219 edev->class_code = *class_code;
172 edev->mode = 0; 220 edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
221 edev->mode &= 0xFFFFFF00;
222 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
223 edev->mode |= EEH_DEV_BRIDGE;
224 if (edev->pcie_cap) {
225 rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
226 2, &pcie_flags);
227 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
228 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
229 edev->mode |= EEH_DEV_ROOT_PORT;
230 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
231 edev->mode |= EEH_DEV_DS_PORT;
232 }
233 }
173 234
174 /* Retrieve the device address */ 235 /* Retrieve the device address */
175 regs = of_get_property(dn, "reg", NULL); 236 regs = of_get_property(dn, "reg", NULL);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 02d6e21619bb..8bad880bd177 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -146,7 +146,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
146 flags = 0; 146 flags = 0;
147 147
148 /* Make pHyp happy */ 148 /* Make pHyp happy */
149 if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) 149 if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
150 hpte_r &= ~_PAGE_COHERENT; 150 hpte_r &= ~_PAGE_COHERENT;
151 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) 151 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
152 flags |= H_COALESCE_CAND; 152 flags |= H_COALESCE_CAND;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 9f8671a44551..6a5f2b1f32ca 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -569,35 +569,6 @@ error:
569 return ret; 569 return ret;
570} 570}
571 571
572static int unzip_oops(char *oops_buf, char *big_buf)
573{
574 struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
575 u64 timestamp = oops_hdr->timestamp;
576 char *big_oops_data = NULL;
577 char *oops_data_buf = NULL;
578 size_t big_oops_data_sz;
579 int unzipped_len;
580
581 big_oops_data = big_buf + sizeof(struct oops_log_info);
582 big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info);
583 oops_data_buf = oops_buf + sizeof(struct oops_log_info);
584
585 unzipped_len = nvram_decompress(oops_data_buf, big_oops_data,
586 oops_hdr->report_length,
587 big_oops_data_sz);
588
589 if (unzipped_len < 0) {
590 pr_err("nvram: decompression failed; returned %d\n",
591 unzipped_len);
592 return -1;
593 }
594 oops_hdr = (struct oops_log_info *)big_buf;
595 oops_hdr->version = OOPS_HDR_VERSION;
596 oops_hdr->report_length = (u16) unzipped_len;
597 oops_hdr->timestamp = timestamp;
598 return 0;
599}
600
601static int nvram_pstore_open(struct pstore_info *psi) 572static int nvram_pstore_open(struct pstore_info *psi)
602{ 573{
603 /* Reset the iterator to start reading partitions again */ 574 /* Reset the iterator to start reading partitions again */
@@ -685,10 +656,9 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
685 unsigned int err_type, id_no, size = 0; 656 unsigned int err_type, id_no, size = 0;
686 struct nvram_os_partition *part = NULL; 657 struct nvram_os_partition *part = NULL;
687 char *buff = NULL, *big_buff = NULL; 658 char *buff = NULL, *big_buff = NULL;
688 int rc, sig = 0; 659 int sig = 0;
689 loff_t p; 660 loff_t p;
690 661
691read_partition:
692 read_type++; 662 read_type++;
693 663
694 switch (nvram_type_ids[read_type]) { 664 switch (nvram_type_ids[read_type]) {
@@ -749,30 +719,46 @@ read_partition:
749 *id = id_no; 719 *id = id_no;
750 720
751 if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { 721 if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
722 int length, unzipped_len;
723 size_t hdr_size;
724
752 oops_hdr = (struct oops_log_info *)buff; 725 oops_hdr = (struct oops_log_info *)buff;
753 *buf = buff + sizeof(*oops_hdr); 726 if (oops_hdr->version < OOPS_HDR_VERSION) {
727 /* Old format oops header had 2-byte record size */
728 hdr_size = sizeof(u16);
729 length = oops_hdr->version;
730 time->tv_sec = 0;
731 time->tv_nsec = 0;
732 } else {
733 hdr_size = sizeof(*oops_hdr);
734 length = oops_hdr->report_length;
735 time->tv_sec = oops_hdr->timestamp;
736 time->tv_nsec = 0;
737 }
738 *buf = kmalloc(length, GFP_KERNEL);
739 if (*buf == NULL)
740 return -ENOMEM;
741 memcpy(*buf, buff + hdr_size, length);
742 kfree(buff);
754 743
755 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) { 744 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
756 big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL); 745 big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
757 if (!big_buff) 746 if (!big_buff)
758 return -ENOMEM; 747 return -ENOMEM;
759 748
760 rc = unzip_oops(buff, big_buff); 749 unzipped_len = nvram_decompress(*buf, big_buff,
750 length, big_oops_buf_sz);
761 751
762 if (rc != 0) { 752 if (unzipped_len < 0) {
763 kfree(buff); 753 pr_err("nvram: decompression failed, returned "
754 "rc %d\n", unzipped_len);
764 kfree(big_buff); 755 kfree(big_buff);
765 goto read_partition; 756 } else {
757 *buf = big_buff;
758 length = unzipped_len;
766 } 759 }
767
768 oops_hdr = (struct oops_log_info *)big_buff;
769 *buf = big_buff + sizeof(*oops_hdr);
770 kfree(buff);
771 } 760 }
772 761 return length;
773 time->tv_sec = oops_hdr->timestamp;
774 time->tv_nsec = 0;
775 return oops_hdr->report_length;
776 } 762 }
777 763
778 *buf = buff; 764 *buf = buff;
@@ -816,6 +802,7 @@ static int nvram_pstore_init(void)
816static void __init nvram_init_oops_partition(int rtas_partition_exists) 802static void __init nvram_init_oops_partition(int rtas_partition_exists)
817{ 803{
818 int rc; 804 int rc;
805 size_t size;
819 806
820 rc = pseries_nvram_init_os_partition(&oops_log_partition); 807 rc = pseries_nvram_init_os_partition(&oops_log_partition);
821 if (rc != 0) { 808 if (rc != 0) {
@@ -844,8 +831,9 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
844 big_oops_buf_sz = (oops_data_sz * 100) / 45; 831 big_oops_buf_sz = (oops_data_sz * 100) / 45;
845 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 832 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
846 if (big_oops_buf) { 833 if (big_oops_buf) {
847 stream.workspace = kmalloc(zlib_deflate_workspacesize( 834 size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
848 WINDOW_BITS, MEM_LEVEL), GFP_KERNEL); 835 zlib_inflate_workspacesize());
836 stream.workspace = kmalloc(size, GFP_KERNEL);
849 if (!stream.workspace) { 837 if (!stream.workspace) {
850 pr_err("nvram: No memory for compression workspace; " 838 pr_err("nvram: No memory for compression workspace; "
851 "skipping compression of %s partition data\n", 839 "skipping compression of %s partition data\n",
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 7b3cbde8c783..721c0586b284 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -287,6 +287,9 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
287 unsigned long *savep; 287 unsigned long *savep;
288 struct rtas_error_log *h, *errhdr = NULL; 288 struct rtas_error_log *h, *errhdr = NULL;
289 289
290 /* Mask top two bits */
291 regs->gpr[3] &= ~(0x3UL << 62);
292
290 if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { 293 if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
291 printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]); 294 printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
292 return NULL; 295 return NULL;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 22f75b504f7f..8a4cae78f03c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -118,6 +118,7 @@ config S390
118 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 118 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
119 select HAVE_KERNEL_BZIP2 119 select HAVE_KERNEL_BZIP2
120 select HAVE_KERNEL_GZIP 120 select HAVE_KERNEL_GZIP
121 select HAVE_KERNEL_LZ4
121 select HAVE_KERNEL_LZMA 122 select HAVE_KERNEL_LZMA
122 select HAVE_KERNEL_LZO 123 select HAVE_KERNEL_LZO
123 select HAVE_KERNEL_XZ 124 select HAVE_KERNEL_XZ
@@ -227,11 +228,12 @@ config MARCH_Z196
227 not work on older machines. 228 not work on older machines.
228 229
229config MARCH_ZEC12 230config MARCH_ZEC12
230 bool "IBM zEC12" 231 bool "IBM zBC12 and zEC12"
231 select HAVE_MARCH_ZEC12_FEATURES if 64BIT 232 select HAVE_MARCH_ZEC12_FEATURES if 64BIT
232 help 233 help
233 Select this to enable optimizations for IBM zEC12 (2827 series). The 234 Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
234 kernel will be slightly faster but will not work on older machines. 235 2827 series). The kernel will be slightly faster but will not work on
236 older machines.
235 237
236endchoice 238endchoice
237 239
@@ -709,6 +711,7 @@ config S390_GUEST
709 def_bool y 711 def_bool y
710 prompt "s390 support for virtio devices" 712 prompt "s390 support for virtio devices"
711 depends on 64BIT 713 depends on 64BIT
714 select TTY
712 select VIRTUALIZATION 715 select VIRTUALIZATION
713 select VIRTIO 716 select VIRTIO
714 select VIRTIO_CONSOLE 717 select VIRTIO_CONSOLE
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 3ad8f61c9985..866ecbe670e4 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -6,9 +6,9 @@
6 6
7BITS := $(if $(CONFIG_64BIT),64,31) 7BITS := $(if $(CONFIG_64BIT),64,31)
8 8
9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ 9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
10 vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \ 10targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
11 sizes.h head$(BITS).o 11targets += misc.o piggy.o sizes.h head$(BITS).o
12 12
13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
14KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 14KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -48,6 +48,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin
48 48
49suffix-$(CONFIG_KERNEL_GZIP) := gz 49suffix-$(CONFIG_KERNEL_GZIP) := gz
50suffix-$(CONFIG_KERNEL_BZIP2) := bz2 50suffix-$(CONFIG_KERNEL_BZIP2) := bz2
51suffix-$(CONFIG_KERNEL_LZ4) := lz4
51suffix-$(CONFIG_KERNEL_LZMA) := lzma 52suffix-$(CONFIG_KERNEL_LZMA) := lzma
52suffix-$(CONFIG_KERNEL_LZO) := lzo 53suffix-$(CONFIG_KERNEL_LZO) := lzo
53suffix-$(CONFIG_KERNEL_XZ) := xz 54suffix-$(CONFIG_KERNEL_XZ) := xz
@@ -56,6 +57,8 @@ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
56 $(call if_changed,gzip) 57 $(call if_changed,gzip)
57$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) 58$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
58 $(call if_changed,bzip2) 59 $(call if_changed,bzip2)
60$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
61 $(call if_changed,lz4)
59$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) 62$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
60 $(call if_changed,lzma) 63 $(call if_changed,lzma)
61$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) 64$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index c4c6a1cf221b..57cbaff1f397 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -47,6 +47,10 @@ static unsigned long free_mem_end_ptr;
47#include "../../../../lib/decompress_bunzip2.c" 47#include "../../../../lib/decompress_bunzip2.c"
48#endif 48#endif
49 49
50#ifdef CONFIG_KERNEL_LZ4
51#include "../../../../lib/decompress_unlz4.c"
52#endif
53
50#ifdef CONFIG_KERNEL_LZMA 54#ifdef CONFIG_KERNEL_LZMA
51#include "../../../../lib/decompress_unlzma.c" 55#include "../../../../lib/decompress_unlzma.c"
52#endif 56#endif
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 4d8604e311f3..7d4676758733 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr,
693 size -= offset; 693 size -= offset;
694 p = addr + offset / BITS_PER_LONG; 694 p = addr + offset / BITS_PER_LONG;
695 if (bit) { 695 if (bit) {
696 set = __flo_word(0, *p & (~0UL << bit)); 696 set = __flo_word(0, *p & (~0UL >> bit));
697 if (set >= size) 697 if (set >= size)
698 return size + offset; 698 return size + offset;
699 if (set < BITS_PER_LONG) 699 if (set < BITS_PER_LONG)
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b75d7d686684..6d6d92b4ea11 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -32,6 +32,7 @@ struct mmu_gather {
32 struct mm_struct *mm; 32 struct mm_struct *mm;
33 struct mmu_table_batch *batch; 33 struct mmu_table_batch *batch;
34 unsigned int fullmm; 34 unsigned int fullmm;
35 unsigned long start, end;
35}; 36};
36 37
37struct mmu_table_batch { 38struct mmu_table_batch {
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
48 49
49static inline void tlb_gather_mmu(struct mmu_gather *tlb, 50static inline void tlb_gather_mmu(struct mmu_gather *tlb,
50 struct mm_struct *mm, 51 struct mm_struct *mm,
51 unsigned int full_mm_flush) 52 unsigned long start,
53 unsigned long end)
52{ 54{
53 tlb->mm = mm; 55 tlb->mm = mm;
54 tlb->fullmm = full_mm_flush; 56 tlb->start = start;
57 tlb->end = end;
58 tlb->fullmm = !(start | (end+1));
55 tlb->batch = NULL; 59 tlb->batch = NULL;
56 if (tlb->fullmm) 60 if (tlb->fullmm)
57 __tlb_flush_mm(mm); 61 __tlb_flush_mm(mm);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index a6fc037671b1..500aa1029bcb 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -52,12 +52,13 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
52 52
53static bool is_in_guest(struct pt_regs *regs) 53static bool is_in_guest(struct pt_regs *regs)
54{ 54{
55 unsigned long ip = instruction_pointer(regs);
56
57 if (user_mode(regs)) 55 if (user_mode(regs))
58 return false; 56 return false;
59 57#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
60 return ip == (unsigned long) &sie_exit; 58 return instruction_pointer(regs) == (unsigned long) &sie_exit;
59#else
60 return false;
61#endif
61} 62}
62 63
63static unsigned long guest_is_user_mode(struct pt_regs *regs) 64static unsigned long guest_is_user_mode(struct pt_regs *regs)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 497451ec5e26..aeed8a61fa0d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -994,6 +994,7 @@ static void __init setup_hwcaps(void)
994 strcpy(elf_platform, "z196"); 994 strcpy(elf_platform, "z196");
995 break; 995 break;
996 case 0x2827: 996 case 0x2827:
997 case 0x2828:
997 strcpy(elf_platform, "zEC12"); 998 strcpy(elf_platform, "zEC12");
998 break; 999 break;
999 } 1000 }
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ba694d2ba51e..34c1c9a90be2 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -702,14 +702,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
702 return rc; 702 return rc;
703 703
704 vcpu->arch.sie_block->icptcode = 0; 704 vcpu->arch.sie_block->icptcode = 0;
705 preempt_disable();
706 kvm_guest_enter();
707 preempt_enable();
708 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 705 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
709 atomic_read(&vcpu->arch.sie_block->cpuflags)); 706 atomic_read(&vcpu->arch.sie_block->cpuflags));
710 trace_kvm_s390_sie_enter(vcpu, 707 trace_kvm_s390_sie_enter(vcpu,
711 atomic_read(&vcpu->arch.sie_block->cpuflags)); 708 atomic_read(&vcpu->arch.sie_block->cpuflags));
709
710 /*
711 * As PF_VCPU will be used in fault handler, between guest_enter
712 * and guest_exit should be no uaccess.
713 */
714 preempt_disable();
715 kvm_guest_enter();
716 preempt_enable();
712 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); 717 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
718 kvm_guest_exit();
719
720 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
721 vcpu->arch.sie_block->icptcode);
722 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
723
713 if (rc > 0) 724 if (rc > 0)
714 rc = 0; 725 rc = 0;
715 if (rc < 0) { 726 if (rc < 0) {
@@ -721,10 +732,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
721 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 732 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
722 } 733 }
723 } 734 }
724 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
725 vcpu->arch.sie_block->icptcode);
726 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
727 kvm_guest_exit();
728 735
729 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 736 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
730 return rc; 737 return rc;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0da3e6eb6be6..4cdc54e63ebc 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -16,6 +16,7 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/compat.h> 17#include <linux/compat.h>
18#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#include <asm/facility.h>
19#include <asm/current.h> 20#include <asm/current.h>
20#include <asm/debug.h> 21#include <asm/debug.h>
21#include <asm/ebcdic.h> 22#include <asm/ebcdic.h>
@@ -532,8 +533,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
532 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 533 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
533 534
534 /* Only provide non-quiescing support if the host supports it */ 535 /* Only provide non-quiescing support if the host supports it */
535 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && 536 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
536 S390_lowcore.stfl_fac_list & 0x00020000)
537 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 537 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
538 538
539 /* No support for conditional-SSKE */ 539 /* No support for conditional-SSKE */
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ce36ea80e4f9..ad446b0c55b6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -69,6 +69,7 @@ static void __init setup_zero_pages(void)
69 order = 2; 69 order = 2;
70 break; 70 break;
71 case 0x2827: /* zEC12 */ 71 case 0x2827: /* zEC12 */
72 case 0x2828: /* zEC12 */
72 default: 73 default:
73 order = 5; 74 order = 5;
74 break; 75 break;
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index ffeb17ce7f31..930783d2c99b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -440,7 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
440 switch (id.machine) { 440 switch (id.machine) {
441 case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; 441 case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
442 case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; 442 case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
443 case 0x2827: ops->cpu_type = "s390/zEC12"; break; 443 case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
444 default: return -ENODEV; 444 default: return -ENODEV;
445 } 445 }
446 } 446 }
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index c8def8bc9020..5fc237581caf 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT
87 87
88source "init/Kconfig" 88source "init/Kconfig"
89 89
90source "kernel/Kconfig.freezer"
91
90config MMU 92config MMU
91 def_bool y 93 def_bool y
92 94
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 2051821724c6..0cf4097b71e8 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -22,7 +22,7 @@ CONFIG_PREEMPT=y
22CONFIG_CMDLINE_OVERWRITE=y 22CONFIG_CMDLINE_OVERWRITE=y
23CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs" 23CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs"
24CONFIG_PCI=y 24CONFIG_PCI=y
25CONFIG_HOTPLUG_PCI=m 25CONFIG_HOTPLUG_PCI=y
26CONFIG_BINFMT_MISC=y 26CONFIG_BINFMT_MISC=y
27CONFIG_NET=y 27CONFIG_NET=y
28CONFIG_PACKET=y 28CONFIG_PACKET=y
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index e61d43d9f689..362192ed12fe 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
36} 36}
37 37
38static inline void 38static inline void
39tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 39tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
40{ 40{
41 tlb->mm = mm; 41 tlb->mm = mm;
42 tlb->fullmm = full_mm_flush; 42 tlb->start = start;
43 tlb->end = end;
44 tlb->fullmm = !(start | (end+1));
43 45
44 init_tlb_gather(tlb); 46 init_tlb_gather(tlb);
45} 47}
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 4febacd1a8a1..29b0301c18aa 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
45} 45}
46 46
47static inline void 47static inline void
48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
49{ 49{
50 tlb->mm = mm; 50 tlb->mm = mm;
51 tlb->fullmm = full_mm_flush; 51 tlb->start = start;
52 tlb->end = end;
53 tlb->fullmm = !(start | (end+1));
52 54
53 init_tlb_gather(tlb); 55 init_tlb_gather(tlb);
54} 56}
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index d606463aa6d6..b7388a425f09 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -225,7 +225,7 @@ static void low_free(unsigned long size, unsigned long addr)
225 unsigned long nr_pages; 225 unsigned long nr_pages;
226 226
227 nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; 227 nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
228 efi_call_phys2(sys_table->boottime->free_pages, addr, size); 228 efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages);
229} 229}
230 230
231static void find_bits(unsigned long mask, u8 *pos, u8 *size) 231static void find_bits(unsigned long mask, u8 *pos, u8 *size)
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 7d6ba9db1be9..6c63c358a7e6 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
27obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o 27obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
28obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o 28obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
29obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o 29obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
30obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
31 30
32# These modules require assembler to support AVX. 31# These modules require assembler to support AVX.
33ifeq ($(avx_supported),yes) 32ifeq ($(avx_supported),yes)
@@ -82,4 +81,3 @@ crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
82crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o 81crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o
83sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o 82sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o
84sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o 83sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
85crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S
deleted file mode 100644
index 35e97569d05f..000000000000
--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
+++ /dev/null
@@ -1,643 +0,0 @@
1########################################################################
2# Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
3#
4# Copyright (c) 2013, Intel Corporation
5#
6# Authors:
7# Erdinc Ozturk <erdinc.ozturk@intel.com>
8# Vinodh Gopal <vinodh.gopal@intel.com>
9# James Guilford <james.guilford@intel.com>
10# Tim Chen <tim.c.chen@linux.intel.com>
11#
12# This software is available to you under a choice of one of two
13# licenses. You may choose to be licensed under the terms of the GNU
14# General Public License (GPL) Version 2, available from the file
15# COPYING in the main directory of this source tree, or the
16# OpenIB.org BSD license below:
17#
18# Redistribution and use in source and binary forms, with or without
19# modification, are permitted provided that the following conditions are
20# met:
21#
22# * Redistributions of source code must retain the above copyright
23# notice, this list of conditions and the following disclaimer.
24#
25# * Redistributions in binary form must reproduce the above copyright
26# notice, this list of conditions and the following disclaimer in the
27# documentation and/or other materials provided with the
28# distribution.
29#
30# * Neither the name of the Intel Corporation nor the names of its
31# contributors may be used to endorse or promote products derived from
32# this software without specific prior written permission.
33#
34#
35# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
36# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
39# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
40# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
41# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
42# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46########################################################################
47# Function API:
48# UINT16 crc_t10dif_pcl(
49# UINT16 init_crc, //initial CRC value, 16 bits
50# const unsigned char *buf, //buffer pointer to calculate CRC on
51# UINT64 len //buffer length in bytes (64-bit data)
52# );
53#
54# Reference paper titled "Fast CRC Computation for Generic
55# Polynomials Using PCLMULQDQ Instruction"
56# URL: http://www.intel.com/content/dam/www/public/us/en/documents
57# /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
58#
59#
60
61#include <linux/linkage.h>
62
63.text
64
65#define arg1 %rdi
66#define arg2 %rsi
67#define arg3 %rdx
68
69#define arg1_low32 %edi
70
71ENTRY(crc_t10dif_pcl)
72.align 16
73
74 # adjust the 16-bit initial_crc value, scale it to 32 bits
75 shl $16, arg1_low32
76
77 # Allocate Stack Space
78 mov %rsp, %rcx
79 sub $16*2, %rsp
80 # align stack to 16 byte boundary
81 and $~(0x10 - 1), %rsp
82
83 # check if smaller than 256
84 cmp $256, arg3
85
86 # for sizes less than 128, we can't fold 64B at a time...
87 jl _less_than_128
88
89
90 # load the initial crc value
91 movd arg1_low32, %xmm10 # initial crc
92
93 # crc value does not need to be byte-reflected, but it needs
94 # to be moved to the high part of the register.
95 # because data will be byte-reflected and will align with
96 # initial crc at correct place.
97 pslldq $12, %xmm10
98
99 movdqa SHUF_MASK(%rip), %xmm11
100 # receive the initial 64B data, xor the initial crc value
101 movdqu 16*0(arg2), %xmm0
102 movdqu 16*1(arg2), %xmm1
103 movdqu 16*2(arg2), %xmm2
104 movdqu 16*3(arg2), %xmm3
105 movdqu 16*4(arg2), %xmm4
106 movdqu 16*5(arg2), %xmm5
107 movdqu 16*6(arg2), %xmm6
108 movdqu 16*7(arg2), %xmm7
109
110 pshufb %xmm11, %xmm0
111 # XOR the initial_crc value
112 pxor %xmm10, %xmm0
113 pshufb %xmm11, %xmm1
114 pshufb %xmm11, %xmm2
115 pshufb %xmm11, %xmm3
116 pshufb %xmm11, %xmm4
117 pshufb %xmm11, %xmm5
118 pshufb %xmm11, %xmm6
119 pshufb %xmm11, %xmm7
120
121 movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4
122 #imm value of pclmulqdq instruction
123 #will determine which constant to use
124
125 #################################################################
126 # we subtract 256 instead of 128 to save one instruction from the loop
127 sub $256, arg3
128
129 # at this section of the code, there is 64*x+y (0<=y<64) bytes of
130 # buffer. The _fold_64_B_loop will fold 64B at a time
131 # until we have 64+y Bytes of buffer
132
133
134 # fold 64B at a time. This section of the code folds 4 xmm
135 # registers in parallel
136_fold_64_B_loop:
137
138 # update the buffer pointer
139 add $128, arg2 # buf += 64#
140
141 movdqu 16*0(arg2), %xmm9
142 movdqu 16*1(arg2), %xmm12
143 pshufb %xmm11, %xmm9
144 pshufb %xmm11, %xmm12
145 movdqa %xmm0, %xmm8
146 movdqa %xmm1, %xmm13
147 pclmulqdq $0x0 , %xmm10, %xmm0
148 pclmulqdq $0x11, %xmm10, %xmm8
149 pclmulqdq $0x0 , %xmm10, %xmm1
150 pclmulqdq $0x11, %xmm10, %xmm13
151 pxor %xmm9 , %xmm0
152 xorps %xmm8 , %xmm0
153 pxor %xmm12, %xmm1
154 xorps %xmm13, %xmm1
155
156 movdqu 16*2(arg2), %xmm9
157 movdqu 16*3(arg2), %xmm12
158 pshufb %xmm11, %xmm9
159 pshufb %xmm11, %xmm12
160 movdqa %xmm2, %xmm8
161 movdqa %xmm3, %xmm13
162 pclmulqdq $0x0, %xmm10, %xmm2
163 pclmulqdq $0x11, %xmm10, %xmm8
164 pclmulqdq $0x0, %xmm10, %xmm3
165 pclmulqdq $0x11, %xmm10, %xmm13
166 pxor %xmm9 , %xmm2
167 xorps %xmm8 , %xmm2
168 pxor %xmm12, %xmm3
169 xorps %xmm13, %xmm3
170
171 movdqu 16*4(arg2), %xmm9
172 movdqu 16*5(arg2), %xmm12
173 pshufb %xmm11, %xmm9
174 pshufb %xmm11, %xmm12
175 movdqa %xmm4, %xmm8
176 movdqa %xmm5, %xmm13
177 pclmulqdq $0x0, %xmm10, %xmm4
178 pclmulqdq $0x11, %xmm10, %xmm8
179 pclmulqdq $0x0, %xmm10, %xmm5
180 pclmulqdq $0x11, %xmm10, %xmm13
181 pxor %xmm9 , %xmm4
182 xorps %xmm8 , %xmm4
183 pxor %xmm12, %xmm5
184 xorps %xmm13, %xmm5
185
186 movdqu 16*6(arg2), %xmm9
187 movdqu 16*7(arg2), %xmm12
188 pshufb %xmm11, %xmm9
189 pshufb %xmm11, %xmm12
190 movdqa %xmm6 , %xmm8
191 movdqa %xmm7 , %xmm13
192 pclmulqdq $0x0 , %xmm10, %xmm6
193 pclmulqdq $0x11, %xmm10, %xmm8
194 pclmulqdq $0x0 , %xmm10, %xmm7
195 pclmulqdq $0x11, %xmm10, %xmm13
196 pxor %xmm9 , %xmm6
197 xorps %xmm8 , %xmm6
198 pxor %xmm12, %xmm7
199 xorps %xmm13, %xmm7
200
201 sub $128, arg3
202
203 # check if there is another 64B in the buffer to be able to fold
204 jge _fold_64_B_loop
205 ##################################################################
206
207
208 add $128, arg2
209 # at this point, the buffer pointer is pointing at the last y Bytes
210 # of the buffer the 64B of folded data is in 4 of the xmm
211 # registers: xmm0, xmm1, xmm2, xmm3
212
213
214 # fold the 8 xmm registers to 1 xmm register with different constants
215
216 movdqa rk9(%rip), %xmm10
217 movdqa %xmm0, %xmm8
218 pclmulqdq $0x11, %xmm10, %xmm0
219 pclmulqdq $0x0 , %xmm10, %xmm8
220 pxor %xmm8, %xmm7
221 xorps %xmm0, %xmm7
222
223 movdqa rk11(%rip), %xmm10
224 movdqa %xmm1, %xmm8
225 pclmulqdq $0x11, %xmm10, %xmm1
226 pclmulqdq $0x0 , %xmm10, %xmm8
227 pxor %xmm8, %xmm7
228 xorps %xmm1, %xmm7
229
230 movdqa rk13(%rip), %xmm10
231 movdqa %xmm2, %xmm8
232 pclmulqdq $0x11, %xmm10, %xmm2
233 pclmulqdq $0x0 , %xmm10, %xmm8
234 pxor %xmm8, %xmm7
235 pxor %xmm2, %xmm7
236
237 movdqa rk15(%rip), %xmm10
238 movdqa %xmm3, %xmm8
239 pclmulqdq $0x11, %xmm10, %xmm3
240 pclmulqdq $0x0 , %xmm10, %xmm8
241 pxor %xmm8, %xmm7
242 xorps %xmm3, %xmm7
243
244 movdqa rk17(%rip), %xmm10
245 movdqa %xmm4, %xmm8
246 pclmulqdq $0x11, %xmm10, %xmm4
247 pclmulqdq $0x0 , %xmm10, %xmm8
248 pxor %xmm8, %xmm7
249 pxor %xmm4, %xmm7
250
251 movdqa rk19(%rip), %xmm10
252 movdqa %xmm5, %xmm8
253 pclmulqdq $0x11, %xmm10, %xmm5
254 pclmulqdq $0x0 , %xmm10, %xmm8
255 pxor %xmm8, %xmm7
256 xorps %xmm5, %xmm7
257
258 movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2
259 #imm value of pclmulqdq instruction
260 #will determine which constant to use
261 movdqa %xmm6, %xmm8
262 pclmulqdq $0x11, %xmm10, %xmm6
263 pclmulqdq $0x0 , %xmm10, %xmm8
264 pxor %xmm8, %xmm7
265 pxor %xmm6, %xmm7
266
267
268 # instead of 64, we add 48 to the loop counter to save 1 instruction
269 # from the loop instead of a cmp instruction, we use the negative
270 # flag with the jl instruction
271 add $128-16, arg3
272 jl _final_reduction_for_128
273
274 # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7
275 # and the rest is in memory. We can fold 16 bytes at a time if y>=16
276 # continue folding 16B at a time
277
278_16B_reduction_loop:
279 movdqa %xmm7, %xmm8
280 pclmulqdq $0x11, %xmm10, %xmm7
281 pclmulqdq $0x0 , %xmm10, %xmm8
282 pxor %xmm8, %xmm7
283 movdqu (arg2), %xmm0
284 pshufb %xmm11, %xmm0
285 pxor %xmm0 , %xmm7
286 add $16, arg2
287 sub $16, arg3
288 # instead of a cmp instruction, we utilize the flags with the
289 # jge instruction equivalent of: cmp arg3, 16-16
290 # check if there is any more 16B in the buffer to be able to fold
291 jge _16B_reduction_loop
292
293 #now we have 16+z bytes left to reduce, where 0<= z < 16.
294 #first, we reduce the data in the xmm7 register
295
296
297_final_reduction_for_128:
298 # check if any more data to fold. If not, compute the CRC of
299 # the final 128 bits
300 add $16, arg3
301 je _128_done
302
303 # here we are getting data that is less than 16 bytes.
304 # since we know that there was data before the pointer, we can
305 # offset the input pointer before the actual point, to receive
306 # exactly 16 bytes. after that the registers need to be adjusted.
307_get_last_two_xmms:
308 movdqa %xmm7, %xmm2
309
310 movdqu -16(arg2, arg3), %xmm1
311 pshufb %xmm11, %xmm1
312
313 # get rid of the extra data that was loaded before
314 # load the shift constant
315 lea pshufb_shf_table+16(%rip), %rax
316 sub arg3, %rax
317 movdqu (%rax), %xmm0
318
319 # shift xmm2 to the left by arg3 bytes
320 pshufb %xmm0, %xmm2
321
322 # shift xmm7 to the right by 16-arg3 bytes
323 pxor mask1(%rip), %xmm0
324 pshufb %xmm0, %xmm7
325 pblendvb %xmm2, %xmm1 #xmm0 is implicit
326
327 # fold 16 Bytes
328 movdqa %xmm1, %xmm2
329 movdqa %xmm7, %xmm8
330 pclmulqdq $0x11, %xmm10, %xmm7
331 pclmulqdq $0x0 , %xmm10, %xmm8
332 pxor %xmm8, %xmm7
333 pxor %xmm2, %xmm7
334
335_128_done:
336 # compute crc of a 128-bit value
337 movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10
338 movdqa %xmm7, %xmm0
339
340 #64b fold
341 pclmulqdq $0x1, %xmm10, %xmm7
342 pslldq $8 , %xmm0
343 pxor %xmm0, %xmm7
344
345 #32b fold
346 movdqa %xmm7, %xmm0
347
348 pand mask2(%rip), %xmm0
349
350 psrldq $12, %xmm7
351 pclmulqdq $0x10, %xmm10, %xmm7
352 pxor %xmm0, %xmm7
353
354 #barrett reduction
355_barrett:
356 movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10
357 movdqa %xmm7, %xmm0
358 pclmulqdq $0x01, %xmm10, %xmm7
359 pslldq $4, %xmm7
360 pclmulqdq $0x11, %xmm10, %xmm7
361
362 pslldq $4, %xmm7
363 pxor %xmm0, %xmm7
364 pextrd $1, %xmm7, %eax
365
366_cleanup:
367 # scale the result back to 16 bits
368 shr $16, %eax
369 mov %rcx, %rsp
370 ret
371
372########################################################################
373
374.align 16
375_less_than_128:
376
377 # check if there is enough buffer to be able to fold 16B at a time
378 cmp $32, arg3
379 jl _less_than_32
380 movdqa SHUF_MASK(%rip), %xmm11
381
382 # now if there is, load the constants
383 movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
384
385 movd arg1_low32, %xmm0 # get the initial crc value
386 pslldq $12, %xmm0 # align it to its correct place
387 movdqu (arg2), %xmm7 # load the plaintext
388 pshufb %xmm11, %xmm7 # byte-reflect the plaintext
389 pxor %xmm0, %xmm7
390
391
392 # update the buffer pointer
393 add $16, arg2
394
395 # update the counter. subtract 32 instead of 16 to save one
396 # instruction from the loop
397 sub $32, arg3
398
399 jmp _16B_reduction_loop
400
401
402.align 16
403_less_than_32:
404 # mov initial crc to the return value. this is necessary for
405 # zero-length buffers.
406 mov arg1_low32, %eax
407 test arg3, arg3
408 je _cleanup
409
410 movdqa SHUF_MASK(%rip), %xmm11
411
412 movd arg1_low32, %xmm0 # get the initial crc value
413 pslldq $12, %xmm0 # align it to its correct place
414
415 cmp $16, arg3
416 je _exact_16_left
417 jl _less_than_16_left
418
419 movdqu (arg2), %xmm7 # load the plaintext
420 pshufb %xmm11, %xmm7 # byte-reflect the plaintext
421 pxor %xmm0 , %xmm7 # xor the initial crc value
422 add $16, arg2
423 sub $16, arg3
424 movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
425 jmp _get_last_two_xmms
426
427
428.align 16
429_less_than_16_left:
430 # use stack space to load data less than 16 bytes, zero-out
431 # the 16B in memory first.
432
433 pxor %xmm1, %xmm1
434 mov %rsp, %r11
435 movdqa %xmm1, (%r11)
436
437 cmp $4, arg3
438 jl _only_less_than_4
439
440 # backup the counter value
441 mov arg3, %r9
442 cmp $8, arg3
443 jl _less_than_8_left
444
445 # load 8 Bytes
446 mov (arg2), %rax
447 mov %rax, (%r11)
448 add $8, %r11
449 sub $8, arg3
450 add $8, arg2
451_less_than_8_left:
452
453 cmp $4, arg3
454 jl _less_than_4_left
455
456 # load 4 Bytes
457 mov (arg2), %eax
458 mov %eax, (%r11)
459 add $4, %r11
460 sub $4, arg3
461 add $4, arg2
462_less_than_4_left:
463
464 cmp $2, arg3
465 jl _less_than_2_left
466
467 # load 2 Bytes
468 mov (arg2), %ax
469 mov %ax, (%r11)
470 add $2, %r11
471 sub $2, arg3
472 add $2, arg2
473_less_than_2_left:
474 cmp $1, arg3
475 jl _zero_left
476
477 # load 1 Byte
478 mov (arg2), %al
479 mov %al, (%r11)
480_zero_left:
481 movdqa (%rsp), %xmm7
482 pshufb %xmm11, %xmm7
483 pxor %xmm0 , %xmm7 # xor the initial crc value
484
485 # shl r9, 4
486 lea pshufb_shf_table+16(%rip), %rax
487 sub %r9, %rax
488 movdqu (%rax), %xmm0
489 pxor mask1(%rip), %xmm0
490
491 pshufb %xmm0, %xmm7
492 jmp _128_done
493
494.align 16
495_exact_16_left:
496 movdqu (arg2), %xmm7
497 pshufb %xmm11, %xmm7
498 pxor %xmm0 , %xmm7 # xor the initial crc value
499
500 jmp _128_done
501
502_only_less_than_4:
503 cmp $3, arg3
504 jl _only_less_than_3
505
506 # load 3 Bytes
507 mov (arg2), %al
508 mov %al, (%r11)
509
510 mov 1(arg2), %al
511 mov %al, 1(%r11)
512
513 mov 2(arg2), %al
514 mov %al, 2(%r11)
515
516 movdqa (%rsp), %xmm7
517 pshufb %xmm11, %xmm7
518 pxor %xmm0 , %xmm7 # xor the initial crc value
519
520 psrldq $5, %xmm7
521
522 jmp _barrett
523_only_less_than_3:
524 cmp $2, arg3
525 jl _only_less_than_2
526
527 # load 2 Bytes
528 mov (arg2), %al
529 mov %al, (%r11)
530
531 mov 1(arg2), %al
532 mov %al, 1(%r11)
533
534 movdqa (%rsp), %xmm7
535 pshufb %xmm11, %xmm7
536 pxor %xmm0 , %xmm7 # xor the initial crc value
537
538 psrldq $6, %xmm7
539
540 jmp _barrett
541_only_less_than_2:
542
543 # load 1 Byte
544 mov (arg2), %al
545 mov %al, (%r11)
546
547 movdqa (%rsp), %xmm7
548 pshufb %xmm11, %xmm7
549 pxor %xmm0 , %xmm7 # xor the initial crc value
550
551 psrldq $7, %xmm7
552
553 jmp _barrett
554
555ENDPROC(crc_t10dif_pcl)
556
557.data
558
559# precomputed constants
560# these constants are precomputed from the poly:
561# 0x8bb70000 (0x8bb7 scaled to 32 bits)
562.align 16
563# Q = 0x18BB70000
564# rk1 = 2^(32*3) mod Q << 32
565# rk2 = 2^(32*5) mod Q << 32
566# rk3 = 2^(32*15) mod Q << 32
567# rk4 = 2^(32*17) mod Q << 32
568# rk5 = 2^(32*3) mod Q << 32
569# rk6 = 2^(32*2) mod Q << 32
570# rk7 = floor(2^64/Q)
571# rk8 = Q
572rk1:
573.quad 0x2d56000000000000
574rk2:
575.quad 0x06df000000000000
576rk3:
577.quad 0x9d9d000000000000
578rk4:
579.quad 0x7cf5000000000000
580rk5:
581.quad 0x2d56000000000000
582rk6:
583.quad 0x1368000000000000
584rk7:
585.quad 0x00000001f65a57f8
586rk8:
587.quad 0x000000018bb70000
588
589rk9:
590.quad 0xceae000000000000
591rk10:
592.quad 0xbfd6000000000000
593rk11:
594.quad 0x1e16000000000000
595rk12:
596.quad 0x713c000000000000
597rk13:
598.quad 0xf7f9000000000000
599rk14:
600.quad 0x80a6000000000000
601rk15:
602.quad 0x044c000000000000
603rk16:
604.quad 0xe658000000000000
605rk17:
606.quad 0xad18000000000000
607rk18:
608.quad 0xa497000000000000
609rk19:
610.quad 0x6ee3000000000000
611rk20:
612.quad 0xe7b5000000000000
613
614
615
616mask1:
617.octa 0x80808080808080808080808080808080
618mask2:
619.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
620
621SHUF_MASK:
622.octa 0x000102030405060708090A0B0C0D0E0F
623
624pshufb_shf_table:
625# use these values for shift constants for the pshufb instruction
626# different alignments result in values as shown:
627# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
628# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
629# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
630# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
631# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
632# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
633# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
634# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
635# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
636# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
637# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
638# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
639# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
640# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
641# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
642.octa 0x8f8e8d8c8b8a89888786858483828100
643.octa 0x000e0d0c0b0a09080706050403020100
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
deleted file mode 100644
index 7845d7fd54c0..000000000000
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions
5 *
6 * Copyright (C) 2013 Intel Corporation
7 * Author: Tim Chen <tim.c.chen@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 */
24
25#include <linux/types.h>
26#include <linux/module.h>
27#include <linux/crc-t10dif.h>
28#include <crypto/internal/hash.h>
29#include <linux/init.h>
30#include <linux/string.h>
31#include <linux/kernel.h>
32#include <asm/i387.h>
33#include <asm/cpufeature.h>
34#include <asm/cpu_device_id.h>
35
36asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
37 size_t len);
38
39struct chksum_desc_ctx {
40 __u16 crc;
41};
42
43/*
44 * Steps through buffer one byte at at time, calculates reflected
45 * crc using table.
46 */
47
48static int chksum_init(struct shash_desc *desc)
49{
50 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
51
52 ctx->crc = 0;
53
54 return 0;
55}
56
57static int chksum_update(struct shash_desc *desc, const u8 *data,
58 unsigned int length)
59{
60 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
61
62 if (irq_fpu_usable()) {
63 kernel_fpu_begin();
64 ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
65 kernel_fpu_end();
66 } else
67 ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
68 return 0;
69}
70
71static int chksum_final(struct shash_desc *desc, u8 *out)
72{
73 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
74
75 *(__u16 *)out = ctx->crc;
76 return 0;
77}
78
79static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
80 u8 *out)
81{
82 if (irq_fpu_usable()) {
83 kernel_fpu_begin();
84 *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
85 kernel_fpu_end();
86 } else
87 *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
88 return 0;
89}
90
91static int chksum_finup(struct shash_desc *desc, const u8 *data,
92 unsigned int len, u8 *out)
93{
94 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
95
96 return __chksum_finup(&ctx->crc, data, len, out);
97}
98
99static int chksum_digest(struct shash_desc *desc, const u8 *data,
100 unsigned int length, u8 *out)
101{
102 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
103
104 return __chksum_finup(&ctx->crc, data, length, out);
105}
106
107static struct shash_alg alg = {
108 .digestsize = CRC_T10DIF_DIGEST_SIZE,
109 .init = chksum_init,
110 .update = chksum_update,
111 .final = chksum_final,
112 .finup = chksum_finup,
113 .digest = chksum_digest,
114 .descsize = sizeof(struct chksum_desc_ctx),
115 .base = {
116 .cra_name = "crct10dif",
117 .cra_driver_name = "crct10dif-pclmul",
118 .cra_priority = 200,
119 .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
120 .cra_module = THIS_MODULE,
121 }
122};
123
124static const struct x86_cpu_id crct10dif_cpu_id[] = {
125 X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ),
126 {}
127};
128MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id);
129
130static int __init crct10dif_intel_mod_init(void)
131{
132 if (!x86_match_cpu(crct10dif_cpu_id))
133 return -ENODEV;
134
135 return crypto_register_shash(&alg);
136}
137
138static void __exit crct10dif_intel_mod_fini(void)
139{
140 crypto_unregister_shash(&alg);
141}
142
143module_init(crct10dif_intel_mod_init);
144module_exit(crct10dif_intel_mod_fini);
145
146MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
147MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
148MODULE_LICENSE("GPL");
149
150MODULE_ALIAS("crct10dif");
151MODULE_ALIAS("crct10dif-pclmul");
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 653668d140f9..4a8cb8d7cbd5 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
35 */ 35 */
36 if (boot_params->sentinel) { 36 if (boot_params->sentinel) {
37 /* fields in boot_params are left uninitialized, clear them */ 37 /* fields in boot_params are left uninitialized, clear them */
38 memset(&boot_params->olpc_ofw_header, 0, 38 memset(&boot_params->ext_ramdisk_image, 0,
39 (char *)&boot_params->efi_info - 39 (char *)&boot_params->efi_info -
40 (char *)&boot_params->olpc_ofw_header); 40 (char *)&boot_params->ext_ramdisk_image);
41 memset(&boot_params->kbd_status, 0, 41 memset(&boot_params->kbd_status, 0,
42 (char *)&boot_params->hdr - 42 (char *)&boot_params->hdr -
43 (char *)&boot_params->kbd_status); 43 (char *)&boot_params->kbd_status);
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 50e5c58ced23..4c019179a57d 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
59 59
60extern int __apply_microcode_amd(struct microcode_amd *mc_amd); 60extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
61extern int apply_microcode_amd(int cpu); 61extern int apply_microcode_amd(int cpu);
62extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size); 62extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
63 63
64#ifdef CONFIG_MICROCODE_AMD_EARLY 64#ifdef CONFIG_MICROCODE_AMD_EARLY
65#ifdef CONFIG_X86_32 65#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index f2b489cf1602..3bf2dd0cf61f 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
55#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) 55#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
56#endif 56#endif
57 57
58#ifdef CONFIG_MEM_SOFT_DIRTY
59
60/*
61 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
62 * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
63 * into this range.
64 */
65#define PTE_FILE_MAX_BITS 28
66#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
67#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
68#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
69#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1)
70#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
71#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
72#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
73
74#define pte_to_pgoff(pte) \
75 ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
76 & ((1U << PTE_FILE_BITS1) - 1))) \
77 + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
78 & ((1U << PTE_FILE_BITS2) - 1)) \
79 << (PTE_FILE_BITS1)) \
80 + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
81 & ((1U << PTE_FILE_BITS3) - 1)) \
82 << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
83 + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
84 << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
85
86#define pgoff_to_pte(off) \
87 ((pte_t) { .pte_low = \
88 ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
89 + ((((off) >> PTE_FILE_BITS1) \
90 & ((1U << PTE_FILE_BITS2) - 1)) \
91 << PTE_FILE_SHIFT2) \
92 + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
93 & ((1U << PTE_FILE_BITS3) - 1)) \
94 << PTE_FILE_SHIFT3) \
95 + ((((off) >> \
96 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
97 << PTE_FILE_SHIFT4) \
98 + _PAGE_FILE })
99
100#else /* CONFIG_MEM_SOFT_DIRTY */
101
58/* 102/*
59 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 103 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
60 * split up the 29 bits of offset into this range: 104 * split up the 29 bits of offset into this range.
61 */ 105 */
62#define PTE_FILE_MAX_BITS 29 106#define PTE_FILE_MAX_BITS 29
63#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) 107#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
88 << PTE_FILE_SHIFT3) \ 132 << PTE_FILE_SHIFT3) \
89 + _PAGE_FILE }) 133 + _PAGE_FILE })
90 134
135#endif /* CONFIG_MEM_SOFT_DIRTY */
136
91/* Encode and de-code a swap entry */ 137/* Encode and de-code a swap entry */
92#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE 138#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
93#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) 139#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 4cc9f2b7cdc3..81bb91b49a88 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
179/* 179/*
180 * Bits 0, 6 and 7 are taken in the low part of the pte, 180 * Bits 0, 6 and 7 are taken in the low part of the pte,
181 * put the 32 bits of offset into the high part. 181 * put the 32 bits of offset into the high part.
182 *
183 * For soft-dirty tracking 11 bit is taken from
184 * the low part of pte as well.
182 */ 185 */
183#define pte_to_pgoff(pte) ((pte).pte_high) 186#define pte_to_pgoff(pte) ((pte).pte_high)
184#define pgoff_to_pte(off) \ 187#define pgoff_to_pte(off) \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7dc305a46058..1c00631164c2 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
315} 315}
316 316
317static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
318{
319 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
320}
321
322static inline int pte_swp_soft_dirty(pte_t pte)
323{
324 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
325}
326
327static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
328{
329 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
330}
331
332static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
333{
334 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
335}
336
337static inline pte_t pte_file_mksoft_dirty(pte_t pte)
338{
339 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
340}
341
342static inline int pte_file_soft_dirty(pte_t pte)
343{
344 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
345}
346
317/* 347/*
318 * Mask out unsupported bits in a present pgprot. Non-present pgprots 348 * Mask out unsupported bits in a present pgprot. Non-present pgprots
319 * can use those bits for other purposes, so leave them be. 349 * can use those bits for other purposes, so leave them be.
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index c98ac63aae48..f4843e031131 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -61,12 +61,27 @@
61 * they do not conflict with each other. 61 * they do not conflict with each other.
62 */ 62 */
63 63
64#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN
65
64#ifdef CONFIG_MEM_SOFT_DIRTY 66#ifdef CONFIG_MEM_SOFT_DIRTY
65#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) 67#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
66#else 68#else
67#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) 69#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
68#endif 70#endif
69 71
72/*
73 * Tracking soft dirty bit when a page goes to a swap is tricky.
74 * We need a bit which can be stored in pte _and_ not conflict
75 * with swap entry format. On x86 bits 6 and 7 are *not* involved
76 * into swap entry computation, but bit 6 is used for nonlinear
77 * file mapping, so we borrow bit 7 for soft dirty tracking.
78 */
79#ifdef CONFIG_MEM_SOFT_DIRTY
80#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
81#else
82#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
83#endif
84
70#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 85#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
71#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) 86#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
72#else 87#else
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 33692eaabab5..e3ddd7db723f 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
233#define arch_read_relax(lock) cpu_relax() 233#define arch_read_relax(lock) cpu_relax()
234#define arch_write_relax(lock) cpu_relax() 234#define arch_write_relax(lock) cpu_relax()
235 235
236/* The {read|write|spin}_lock() on x86 are full memory barriers. */
237static inline void smp_mb__after_lock(void) { }
238#define ARCH_HAS_SMP_MB_AFTER_LOCK
239
240#endif /* _ASM_X86_SPINLOCK_H */ 236#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f654ecefea5b..08a089043ccf 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
512 512
513static const int amd_erratum_383[]; 513static const int amd_erratum_383[];
514static const int amd_erratum_400[]; 514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum); 515static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
516 516
517static void init_amd(struct cpuinfo_x86 *c) 517static void init_amd(struct cpuinfo_x86 *c)
518{ 518{
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c)
729 value &= ~(1ULL << 24); 729 value &= ~(1ULL << 24);
730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value); 730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
731 731
732 if (cpu_has_amd_erratum(amd_erratum_383)) 732 if (cpu_has_amd_erratum(c, amd_erratum_383))
733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
734 } 734 }
735 735
736 if (cpu_has_amd_erratum(amd_erratum_400)) 736 if (cpu_has_amd_erratum(c, amd_erratum_400))
737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); 737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
738 738
739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] =
878static const int amd_erratum_383[] = 878static const int amd_erratum_383[] =
879 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 879 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
880 880
881static bool cpu_has_amd_erratum(const int *erratum) 881
882static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
882{ 883{
883 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
884 int osvw_id = *erratum++; 884 int osvw_id = *erratum++;
885 u32 range; 885 u32 range;
886 u32 ms; 886 u32 ms;
887 887
888 /*
889 * If called early enough that current_cpu_data hasn't been initialized
890 * yet, fall back to boot_cpu_data.
891 */
892 if (cpu->x86 == 0)
893 cpu = &boot_cpu_data;
894
895 if (cpu->x86_vendor != X86_VENDOR_AMD)
896 return false;
897
898 if (osvw_id >= 0 && osvw_id < 65536 && 888 if (osvw_id >= 0 && osvw_id < 65536 &&
899 cpu_has(cpu, X86_FEATURE_OSVW)) { 889 cpu_has(cpu, X86_FEATURE_OSVW)) {
900 u64 osvw_len; 890 u64 osvw_len;
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index e2703520d120..c370e1c4468b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -111,8 +111,8 @@ static struct severity {
111#ifdef CONFIG_MEMORY_FAILURE 111#ifdef CONFIG_MEMORY_FAILURE
112 MCESEV( 112 MCESEV(
113 KEEP, "Action required but unaffected thread is continuable", 113 KEEP, "Action required but unaffected thread is continuable",
114 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR), 114 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR),
115 MCGMASK(MCG_STATUS_RIPV, MCG_STATUS_RIPV) 115 MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV)
116 ), 116 ),
117 MCESEV( 117 MCESEV(
118 AR, "Action required: data load error in a user process", 118 AR, "Action required: data load error in a user process",
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index fbc9210b45bc..a45d8d4ace10 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void)
2270 case 70: 2270 case 70:
2271 case 71: 2271 case 71:
2272 case 63: 2272 case 63:
2273 case 69:
2273 x86_pmu.late_ack = true; 2274 x86_pmu.late_ack = true;
2274 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 2275 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2275 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 2276 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index cad791dbde95..1fb6c72717bd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
314static struct uncore_event_desc snbep_uncore_qpi_events[] = { 314static struct uncore_event_desc snbep_uncore_qpi_events[] = {
315 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), 315 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
316 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), 316 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
317 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), 317 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
318 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), 318 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
319 { /* end: all zeroes */ }, 319 { /* end: all zeroes */ },
320}; 320};
321 321
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 94ab6b90dd3f..63bdb29b2549 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func)
196static void __init intel_remapping_check(int num, int slot, int func) 196static void __init intel_remapping_check(int num, int slot, int func)
197{ 197{
198 u8 revision; 198 u8 revision;
199 u16 device;
199 200
201 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
200 revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); 202 revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
201 203
202 /* 204 /*
203 * Revision 0x13 of this chipset supports irq remapping 205 * Revision 13 of all triggering devices id in this quirk have
204 * but has an erratum that breaks its behavior, flag it as such 206 * a problem draining interrupts when irq remapping is enabled,
207 * and should be flagged as broken. Additionally revisions 0x12
208 * and 0x22 of device id 0x3405 has this problem.
205 */ 209 */
206 if (revision == 0x13) 210 if (revision == 0x13)
207 set_irq_remapping_broken(); 211 set_irq_remapping_broken();
212 else if ((device == 0x3405) &&
213 ((revision == 0x12) ||
214 (revision == 0x22)))
215 set_irq_remapping_broken();
208 216
209} 217}
210 218
@@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = {
239 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 247 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
240 { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, 248 { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
241 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 249 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
250 { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
251 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
242 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 252 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
243 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 253 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
244 {} 254 {}
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 202d24f0f7e7..5d576ab34403 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -116,7 +116,7 @@ static void mxcsr_feature_mask_init(void)
116 116
117 if (cpu_has_fxsr) { 117 if (cpu_has_fxsr) {
118 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); 118 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
119 asm volatile("fxsave %0" : : "m" (fx_scratch)); 119 asm volatile("fxsave %0" : "+m" (fx_scratch));
120 mask = fx_scratch.mxcsr_mask; 120 mask = fx_scratch.mxcsr_mask;
121 if (mask == 0) 121 if (mask == 0)
122 mask = 0x0000ffbf; 122 mask = 0x0000ffbf;
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 47ebb1dbfbcb..7123b5df479d 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
145 return 0; 145 return 0;
146} 146}
147 147
148static unsigned int verify_patch_size(int cpu, u32 patch_size, 148static unsigned int verify_patch_size(u8 family, u32 patch_size,
149 unsigned int size) 149 unsigned int size)
150{ 150{
151 struct cpuinfo_x86 *c = &cpu_data(cpu);
152 u32 max_size; 151 u32 max_size;
153 152
154#define F1XH_MPB_MAX_SIZE 2048 153#define F1XH_MPB_MAX_SIZE 2048
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
156#define F15H_MPB_MAX_SIZE 4096 155#define F15H_MPB_MAX_SIZE 4096
157#define F16H_MPB_MAX_SIZE 3458 156#define F16H_MPB_MAX_SIZE 3458
158 157
159 switch (c->x86) { 158 switch (family) {
160 case 0x14: 159 case 0x14:
161 max_size = F14H_MPB_MAX_SIZE; 160 max_size = F14H_MPB_MAX_SIZE;
162 break; 161 break;
@@ -220,12 +219,13 @@ int apply_microcode_amd(int cpu)
220 return 0; 219 return 0;
221 } 220 }
222 221
223 if (__apply_microcode_amd(mc_amd)) 222 if (__apply_microcode_amd(mc_amd)) {
224 pr_err("CPU%d: update failed for patch_level=0x%08x\n", 223 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
225 cpu, mc_amd->hdr.patch_id); 224 cpu, mc_amd->hdr.patch_id);
226 else 225 return -1;
227 pr_info("CPU%d: new patch_level=0x%08x\n", cpu, 226 }
228 mc_amd->hdr.patch_id); 227 pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
228 mc_amd->hdr.patch_id);
229 229
230 uci->cpu_sig.rev = mc_amd->hdr.patch_id; 230 uci->cpu_sig.rev = mc_amd->hdr.patch_id;
231 c->microcode = mc_amd->hdr.patch_id; 231 c->microcode = mc_amd->hdr.patch_id;
@@ -276,9 +276,8 @@ static void cleanup(void)
276 * driver cannot continue functioning normally. In such cases, we tear 276 * driver cannot continue functioning normally. In such cases, we tear
277 * down everything we've used up so far and exit. 277 * down everything we've used up so far and exit.
278 */ 278 */
279static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) 279static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
280{ 280{
281 struct cpuinfo_x86 *c = &cpu_data(cpu);
282 struct microcode_header_amd *mc_hdr; 281 struct microcode_header_amd *mc_hdr;
283 struct ucode_patch *patch; 282 struct ucode_patch *patch;
284 unsigned int patch_size, crnt_size, ret; 283 unsigned int patch_size, crnt_size, ret;
@@ -298,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
298 297
299 /* check if patch is for the current family */ 298 /* check if patch is for the current family */
300 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); 299 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
301 if (proc_fam != c->x86) 300 if (proc_fam != family)
302 return crnt_size; 301 return crnt_size;
303 302
304 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { 303 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
@@ -307,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
307 return crnt_size; 306 return crnt_size;
308 } 307 }
309 308
310 ret = verify_patch_size(cpu, patch_size, leftover); 309 ret = verify_patch_size(family, patch_size, leftover);
311 if (!ret) { 310 if (!ret) {
312 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); 311 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
313 return crnt_size; 312 return crnt_size;
@@ -338,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
338 return crnt_size; 337 return crnt_size;
339} 338}
340 339
341static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) 340static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
341 size_t size)
342{ 342{
343 enum ucode_state ret = UCODE_ERROR; 343 enum ucode_state ret = UCODE_ERROR;
344 unsigned int leftover; 344 unsigned int leftover;
@@ -361,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
361 } 361 }
362 362
363 while (leftover) { 363 while (leftover) {
364 crnt_size = verify_and_add_patch(cpu, fw, leftover); 364 crnt_size = verify_and_add_patch(family, fw, leftover);
365 if (crnt_size < 0) 365 if (crnt_size < 0)
366 return ret; 366 return ret;
367 367
@@ -372,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
372 return UCODE_OK; 372 return UCODE_OK;
373} 373}
374 374
375enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) 375enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
376{ 376{
377 enum ucode_state ret; 377 enum ucode_state ret;
378 378
379 /* free old equiv table */ 379 /* free old equiv table */
380 free_equiv_cpu_table(); 380 free_equiv_cpu_table();
381 381
382 ret = __load_microcode_amd(cpu, data, size); 382 ret = __load_microcode_amd(family, data, size);
383 383
384 if (ret != UCODE_OK) 384 if (ret != UCODE_OK)
385 cleanup(); 385 cleanup();
386 386
387#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) 387#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
388 /* save BSP's matching patch for early load */ 388 /* save BSP's matching patch for early load */
389 if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { 389 if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
390 struct ucode_patch *p = find_patch(cpu); 390 struct ucode_patch *p = find_patch(smp_processor_id());
391 if (p) { 391 if (p) {
392 memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); 392 memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
393 memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), 393 memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
@@ -440,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
440 goto fw_release; 440 goto fw_release;
441 } 441 }
442 442
443 ret = load_microcode_amd(cpu, fw->data, fw->size); 443 ret = load_microcode_amd(c->x86, fw->data, fw->size);
444 444
445 fw_release: 445 fw_release:
446 release_firmware(fw); 446 release_firmware(fw);
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1d14ffee5749..6073104ccaa3 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
238 uci->cpu_sig.sig = cpuid_eax(0x00000001); 238 uci->cpu_sig.sig = cpuid_eax(0x00000001);
239} 239}
240#else 240#else
241static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, 241void load_ucode_amd_ap(void)
242 struct ucode_cpu_info *uci)
243{ 242{
243 unsigned int cpu = smp_processor_id();
244 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
244 u32 rev, eax; 245 u32 rev, eax;
245 246
246 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); 247 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
247 eax = cpuid_eax(0x00000001); 248 eax = cpuid_eax(0x00000001);
248 249
249 uci->cpu_sig.sig = eax;
250 uci->cpu_sig.rev = rev; 250 uci->cpu_sig.rev = rev;
251 c->microcode = rev; 251 uci->cpu_sig.sig = eax;
252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
253}
254
255void load_ucode_amd_ap(void)
256{
257 unsigned int cpu = smp_processor_id();
258
259 collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu);
260 252
261 if (cpu && !ucode_loaded) { 253 if (cpu && !ucode_loaded) {
262 void *ucode; 254 void *ucode;
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void)
265 return; 257 return;
266 258
267 ucode = (void *)(initrd_start + ucode_offset); 259 ucode = (void *)(initrd_start + ucode_offset);
268 if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) 260 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
261 if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
269 return; 262 return;
263
270 ucode_loaded = true; 264 ucode_loaded = true;
271 } 265 }
272 266
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void)
278{ 272{
279 enum ucode_state ret; 273 enum ucode_state ret;
280 void *ucode; 274 void *ucode;
275 u32 eax;
276
281#ifdef CONFIG_X86_32 277#ifdef CONFIG_X86_32
282 unsigned int bsp = boot_cpu_data.cpu_index; 278 unsigned int bsp = boot_cpu_data.cpu_index;
283 struct ucode_cpu_info *uci = ucode_cpu_info + bsp; 279 struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void)
293 return 0; 289 return 0;
294 290
295 ucode = (void *)(initrd_start + ucode_offset); 291 ucode = (void *)(initrd_start + ucode_offset);
296 ret = load_microcode_amd(0, ucode, ucode_size); 292 eax = cpuid_eax(0x00000001);
293 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
294
295 ret = load_microcode_amd(eax, ucode, ucode_size);
297 if (ret != UCODE_OK) 296 if (ret != UCODE_OK)
298 return -EINVAL; 297 return -EINVAL;
299 298
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index dbded5aedb81..30277e27431a 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
101 *begin = new_begin; 101 *begin = new_begin;
102 } 102 }
103 } else { 103 } else {
104 *begin = TASK_UNMAPPED_BASE; 104 *begin = current->mm->mmap_legacy_base;
105 *end = TASK_SIZE; 105 *end = TASK_SIZE;
106 } 106 }
107} 107}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 62c29a5bfe26..25e7e1372bb2 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -112,11 +112,13 @@ static unsigned long mmap_legacy_base(void)
112 */ 112 */
113void arch_pick_mmap_layout(struct mm_struct *mm) 113void arch_pick_mmap_layout(struct mm_struct *mm)
114{ 114{
115 mm->mmap_legacy_base = mmap_legacy_base();
116 mm->mmap_base = mmap_base();
117
115 if (mmap_is_legacy()) { 118 if (mmap_is_legacy()) {
116 mm->mmap_base = mmap_legacy_base(); 119 mm->mmap_base = mm->mmap_legacy_base;
117 mm->get_unmapped_area = arch_get_unmapped_area; 120 mm->get_unmapped_area = arch_get_unmapped_area;
118 } else { 121 } else {
119 mm->mmap_base = mmap_base();
120 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 122 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
121 } 123 }
122} 124}
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index 643b8b5eee86..8244f5ec2f4c 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -12,6 +12,7 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/reboot.h>
15#include <linux/serial_reg.h> 16#include <linux/serial_reg.h>
16#include <linux/serial_8250.h> 17#include <linux/serial_8250.h>
17#include <linux/reboot.h> 18#include <linux/reboot.h>
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 056d11faef21..8f3eea6b80c5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
313 e820_add_region(start, end - start, type); 313 e820_add_region(start, end - start, type);
314} 314}
315 315
316void xen_ignore_unusable(struct e820entry *list, size_t map_size)
317{
318 struct e820entry *entry;
319 unsigned int i;
320
321 for (i = 0, entry = list; i < map_size; i++, entry++) {
322 if (entry->type == E820_UNUSABLE)
323 entry->type = E820_RAM;
324 }
325}
326
316/** 327/**
317 * machine_specific_memory_setup - Hook for machine specific memory setup. 328 * machine_specific_memory_setup - Hook for machine specific memory setup.
318 **/ 329 **/
@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void)
353 } 364 }
354 BUG_ON(rc); 365 BUG_ON(rc);
355 366
367 /*
368 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
369 * regions, so if we're using the machine memory map leave the
370 * region as RAM as it is in the pseudo-physical map.
371 *
372 * UNUSABLE regions in domUs are not handled and will need
373 * a patch in the future.
374 */
375 if (xen_initial_domain())
376 xen_ignore_unusable(map, memmap.nr_entries);
377
356 /* Make sure the Xen-supplied memory map is well-ordered. */ 378 /* Make sure the Xen-supplied memory map is well-ordered. */
357 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); 379 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
358 380
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index ca92754eb846..b81c88e51daa 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
695{ 695{
696 int rc; 696 int rc;
697 rc = native_cpu_up(cpu, tidle); 697 /*
698 WARN_ON (xen_smp_intr_init(cpu)); 698 * xen_smp_intr_init() needs to run before native_cpu_up()
699 * so that IPI vectors are set up on the booting CPU before
700 * it is marked online in native_cpu_up().
701 */
702 rc = xen_smp_intr_init(cpu);
703 WARN_ON(rc);
704 if (!rc)
705 rc = native_cpu_up(cpu, tidle);
699 return rc; 706 return rc;
700} 707}
701 708
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 69ce573f1224..aca01164f002 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -376,25 +376,6 @@ config CRYPTO_CRC32_PCLMUL
376 which will enable any routine to use the CRC-32-IEEE 802.3 checksum 376 which will enable any routine to use the CRC-32-IEEE 802.3 checksum
377 and gain better performance as compared with the table implementation. 377 and gain better performance as compared with the table implementation.
378 378
379config CRYPTO_CRCT10DIF
380 tristate "CRCT10DIF algorithm"
381 select CRYPTO_HASH
382 help
383 CRC T10 Data Integrity Field computation is being cast as
384 a crypto transform. This allows for faster crc t10 diff
385 transforms to be used if they are available.
386
387config CRYPTO_CRCT10DIF_PCLMUL
388 tristate "CRCT10DIF PCLMULQDQ hardware acceleration"
389 depends on X86 && 64BIT && CRC_T10DIF
390 select CRYPTO_HASH
391 help
392 For x86_64 processors with SSE4.2 and PCLMULQDQ supported,
393 CRC T10 DIF PCLMULQDQ computation can be hardware
394 accelerated PCLMULQDQ instruction. This option will create
395 'crct10dif-plcmul' module, which is faster when computing the
396 crct10dif checksum as compared with the generic table implementation.
397
398config CRYPTO_GHASH 379config CRYPTO_GHASH
399 tristate "GHASH digest algorithm" 380 tristate "GHASH digest algorithm"
400 select CRYPTO_GF128MUL 381 select CRYPTO_GF128MUL
diff --git a/crypto/Makefile b/crypto/Makefile
index 2d5ed08a239f..2ba0df2f908f 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -83,7 +83,6 @@ obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
83obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o 83obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
84obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o 84obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
85obj-$(CONFIG_CRYPTO_CRC32) += crc32.o 85obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
86obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif.o
87obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o 86obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
88obj-$(CONFIG_CRYPTO_LZO) += lzo.o 87obj-$(CONFIG_CRYPTO_LZO) += lzo.o
89obj-$(CONFIG_CRYPTO_LZ4) += lz4.o 88obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
diff --git a/crypto/crct10dif.c b/crypto/crct10dif.c
deleted file mode 100644
index 92aca96d6b98..000000000000
--- a/crypto/crct10dif.c
+++ /dev/null
@@ -1,178 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * T10 Data Integrity Field CRC16 Crypto Transform
5 *
6 * Copyright (c) 2007 Oracle Corporation. All rights reserved.
7 * Written by Martin K. Petersen <martin.petersen@oracle.com>
8 * Copyright (C) 2013 Intel Corporation
9 * Author: Tim Chen <tim.c.chen@linux.intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 */
26
27#include <linux/types.h>
28#include <linux/module.h>
29#include <linux/crc-t10dif.h>
30#include <crypto/internal/hash.h>
31#include <linux/init.h>
32#include <linux/string.h>
33#include <linux/kernel.h>
34
35struct chksum_desc_ctx {
36 __u16 crc;
37};
38
39/* Table generated using the following polynomium:
40 * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
41 * gt: 0x8bb7
42 */
43static const __u16 t10_dif_crc_table[256] = {
44 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
45 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
46 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
47 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
48 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
49 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
50 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
51 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
52 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
53 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
54 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
55 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
56 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
57 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
58 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
59 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
60 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
61 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
62 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
63 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
64 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
65 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
66 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
67 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
68 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
69 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
70 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
71 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
72 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
73 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
74 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
75 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
76};
77
78__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len)
79{
80 unsigned int i;
81
82 for (i = 0 ; i < len ; i++)
83 crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
84
85 return crc;
86}
87EXPORT_SYMBOL(crc_t10dif_generic);
88
89/*
90 * Steps through buffer one byte at at time, calculates reflected
91 * crc using table.
92 */
93
94static int chksum_init(struct shash_desc *desc)
95{
96 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
97
98 ctx->crc = 0;
99
100 return 0;
101}
102
103static int chksum_update(struct shash_desc *desc, const u8 *data,
104 unsigned int length)
105{
106 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
107
108 ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
109 return 0;
110}
111
112static int chksum_final(struct shash_desc *desc, u8 *out)
113{
114 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
115
116 *(__u16 *)out = ctx->crc;
117 return 0;
118}
119
120static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
121 u8 *out)
122{
123 *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
124 return 0;
125}
126
127static int chksum_finup(struct shash_desc *desc, const u8 *data,
128 unsigned int len, u8 *out)
129{
130 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
131
132 return __chksum_finup(&ctx->crc, data, len, out);
133}
134
135static int chksum_digest(struct shash_desc *desc, const u8 *data,
136 unsigned int length, u8 *out)
137{
138 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
139
140 return __chksum_finup(&ctx->crc, data, length, out);
141}
142
143static struct shash_alg alg = {
144 .digestsize = CRC_T10DIF_DIGEST_SIZE,
145 .init = chksum_init,
146 .update = chksum_update,
147 .final = chksum_final,
148 .finup = chksum_finup,
149 .digest = chksum_digest,
150 .descsize = sizeof(struct chksum_desc_ctx),
151 .base = {
152 .cra_name = "crct10dif",
153 .cra_driver_name = "crct10dif-generic",
154 .cra_priority = 100,
155 .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
156 .cra_module = THIS_MODULE,
157 }
158};
159
160static int __init crct10dif_mod_init(void)
161{
162 int ret;
163
164 ret = crypto_register_shash(&alg);
165 return ret;
166}
167
168static void __exit crct10dif_mod_fini(void)
169{
170 crypto_unregister_shash(&alg);
171}
172
173module_init(crct10dif_mod_init);
174module_exit(crct10dif_mod_fini);
175
176MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
177MODULE_DESCRIPTION("T10 DIF CRC calculation.");
178MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 25a5934f0e50..66d254ce0d11 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1174,10 +1174,6 @@ static int do_test(int m)
1174 ret += tcrypt_test("ghash"); 1174 ret += tcrypt_test("ghash");
1175 break; 1175 break;
1176 1176
1177 case 47:
1178 ret += tcrypt_test("crct10dif");
1179 break;
1180
1181 case 100: 1177 case 100:
1182 ret += tcrypt_test("hmac(md5)"); 1178 ret += tcrypt_test("hmac(md5)");
1183 break; 1179 break;
@@ -1502,10 +1498,6 @@ static int do_test(int m)
1502 test_hash_speed("crc32c", sec, generic_hash_speed_template); 1498 test_hash_speed("crc32c", sec, generic_hash_speed_template);
1503 if (mode > 300 && mode < 400) break; 1499 if (mode > 300 && mode < 400) break;
1504 1500
1505 case 320:
1506 test_hash_speed("crct10dif", sec, generic_hash_speed_template);
1507 if (mode > 300 && mode < 400) break;
1508
1509 case 399: 1501 case 399:
1510 break; 1502 break;
1511 1503
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 2f00607039e2..ecddf921a9db 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2046,16 +2046,6 @@ static const struct alg_test_desc alg_test_descs[] = {
2046 } 2046 }
2047 } 2047 }
2048 }, { 2048 }, {
2049 .alg = "crct10dif",
2050 .test = alg_test_hash,
2051 .fips_allowed = 1,
2052 .suite = {
2053 .hash = {
2054 .vecs = crct10dif_tv_template,
2055 .count = CRCT10DIF_TEST_VECTORS
2056 }
2057 }
2058 }, {
2059 .alg = "cryptd(__driver-cbc-aes-aesni)", 2049 .alg = "cryptd(__driver-cbc-aes-aesni)",
2060 .test = alg_test_null, 2050 .test = alg_test_null,
2061 .fips_allowed = 1, 2051 .fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 7d44aa3d6b44..1e701bc075b9 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -450,39 +450,6 @@ static struct hash_testvec rmd320_tv_template[] = {
450 } 450 }
451}; 451};
452 452
453#define CRCT10DIF_TEST_VECTORS 3
454static struct hash_testvec crct10dif_tv_template[] = {
455 {
456 .plaintext = "abc",
457 .psize = 3,
458#ifdef __LITTLE_ENDIAN
459 .digest = "\x3b\x44",
460#else
461 .digest = "\x44\x3b",
462#endif
463 }, {
464 .plaintext = "1234567890123456789012345678901234567890"
465 "123456789012345678901234567890123456789",
466 .psize = 79,
467#ifdef __LITTLE_ENDIAN
468 .digest = "\x70\x4b",
469#else
470 .digest = "\x4b\x70",
471#endif
472 }, {
473 .plaintext =
474 "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
475 .psize = 56,
476#ifdef __LITTLE_ENDIAN
477 .digest = "\xe3\x9c",
478#else
479 .digest = "\x9c\xe3",
480#endif
481 .np = 2,
482 .tap = { 28, 28 }
483 }
484};
485
486/* 453/*
487 * SHA1 test vectors from from FIPS PUB 180-1 454 * SHA1 test vectors from from FIPS PUB 180-1
488 * Long vector from CAVS 5.0 455 * Long vector from CAVS 5.0
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index d21167bfc865..dc34a5b8bcee 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -359,6 +359,9 @@ int braille_register_console(struct console *console, int index,
359 char *console_options, char *braille_options) 359 char *console_options, char *braille_options)
360{ 360{
361 int ret; 361 int ret;
362
363 if (!(console->flags & CON_BRL))
364 return 0;
362 if (!console_options) 365 if (!console_options)
363 /* Only support VisioBraille for now */ 366 /* Only support VisioBraille for now */
364 console_options = "57600o8"; 367 console_options = "57600o8";
@@ -374,15 +377,17 @@ int braille_register_console(struct console *console, int index,
374 braille_co = console; 377 braille_co = console;
375 register_keyboard_notifier(&keyboard_notifier_block); 378 register_keyboard_notifier(&keyboard_notifier_block);
376 register_vt_notifier(&vt_notifier_block); 379 register_vt_notifier(&vt_notifier_block);
377 return 0; 380 return 1;
378} 381}
379 382
380int braille_unregister_console(struct console *console) 383int braille_unregister_console(struct console *console)
381{ 384{
382 if (braille_co != console) 385 if (braille_co != console)
383 return -EINVAL; 386 return -EINVAL;
387 if (!(console->flags & CON_BRL))
388 return 0;
384 unregister_keyboard_notifier(&keyboard_notifier_block); 389 unregister_keyboard_notifier(&keyboard_notifier_block);
385 unregister_vt_notifier(&vt_notifier_block); 390 unregister_vt_notifier(&vt_notifier_block);
386 braille_co = NULL; 391 braille_co = NULL;
387 return 0; 392 return 1;
388} 393}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index fd6c51cc3acb..5a74a9c1e42c 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -451,7 +451,6 @@ static void acpi_processor_remove(struct acpi_device *device)
451 /* Clean up. */ 451 /* Clean up. */
452 per_cpu(processor_device_array, pr->id) = NULL; 452 per_cpu(processor_device_array, pr->id) = NULL;
453 per_cpu(processors, pr->id) = NULL; 453 per_cpu(processors, pr->id) = NULL;
454 try_offline_node(cpu_to_node(pr->id));
455 454
456 /* Remove the CPU. */ 455 /* Remove the CPU. */
457 get_online_cpus(); 456 get_online_cpus();
@@ -459,6 +458,8 @@ static void acpi_processor_remove(struct acpi_device *device)
459 acpi_unmap_lsapic(pr->id); 458 acpi_unmap_lsapic(pr->id);
460 put_online_cpus(); 459 put_online_cpus();
461 460
461 try_offline_node(cpu_to_node(pr->id));
462
462 out: 463 out:
463 free_cpumask_var(pr->throttling.shared_cpu_map); 464 free_cpumask_var(pr->throttling.shared_cpu_map);
464 kfree(pr); 465 kfree(pr);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 082b4dd252a8..d405fbad406a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -117,6 +117,7 @@ struct acpi_battery {
117 struct acpi_device *device; 117 struct acpi_device *device;
118 struct notifier_block pm_nb; 118 struct notifier_block pm_nb;
119 unsigned long update_time; 119 unsigned long update_time;
120 int revision;
120 int rate_now; 121 int rate_now;
121 int capacity_now; 122 int capacity_now;
122 int voltage_now; 123 int voltage_now;
@@ -359,6 +360,7 @@ static struct acpi_offsets info_offsets[] = {
359}; 360};
360 361
361static struct acpi_offsets extended_info_offsets[] = { 362static struct acpi_offsets extended_info_offsets[] = {
363 {offsetof(struct acpi_battery, revision), 0},
362 {offsetof(struct acpi_battery, power_unit), 0}, 364 {offsetof(struct acpi_battery, power_unit), 0},
363 {offsetof(struct acpi_battery, design_capacity), 0}, 365 {offsetof(struct acpi_battery, design_capacity), 0},
364 {offsetof(struct acpi_battery, full_charge_capacity), 0}, 366 {offsetof(struct acpi_battery, full_charge_capacity), 0},
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index f68095756fb7..408f6b2a5fa8 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list);
31static DECLARE_RWSEM(bus_type_sem); 31static DECLARE_RWSEM(bus_type_sem);
32 32
33#define PHYSICAL_NODE_STRING "physical_node" 33#define PHYSICAL_NODE_STRING "physical_node"
34#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
34 35
35int register_acpi_bus_type(struct acpi_bus_type *type) 36int register_acpi_bus_type(struct acpi_bus_type *type)
36{ 37{
@@ -78,41 +79,108 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
78 return ret; 79 return ret;
79} 80}
80 81
81static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, 82static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
82 void *addr_p, void **ret_p) 83 void *not_used, void **ret_p)
83{ 84{
84 unsigned long long addr, sta; 85 struct acpi_device *adev = NULL;
85 acpi_status status;
86 86
87 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); 87 acpi_bus_get_device(handle, &adev);
88 if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) { 88 if (adev) {
89 *ret_p = handle; 89 *ret_p = handle;
90 status = acpi_bus_get_status_handle(handle, &sta); 90 return AE_CTRL_TERMINATE;
91 if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED))
92 return AE_CTRL_TERMINATE;
93 } 91 }
94 return AE_OK; 92 return AE_OK;
95} 93}
96 94
97acpi_handle acpi_get_child(acpi_handle parent, u64 address) 95static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
98{ 96{
99 void *ret = NULL; 97 unsigned long long sta;
98 acpi_status status;
99
100 status = acpi_bus_get_status_handle(handle, &sta);
101 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
102 return false;
103
104 if (is_bridge) {
105 void *test = NULL;
106
107 /* Check if this object has at least one child device. */
108 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
109 acpi_dev_present, NULL, NULL, &test);
110 return !!test;
111 }
112 return true;
113}
114
115struct find_child_context {
116 u64 addr;
117 bool is_bridge;
118 acpi_handle ret;
119 bool ret_checked;
120};
121
122static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
123 void *data, void **not_used)
124{
125 struct find_child_context *context = data;
126 unsigned long long addr;
127 acpi_status status;
100 128
101 if (!parent) 129 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
102 return NULL; 130 if (ACPI_FAILURE(status) || addr != context->addr)
131 return AE_OK;
103 132
104 acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL, 133 if (!context->ret) {
105 do_acpi_find_child, &address, &ret); 134 /* This is the first matching object. Save its handle. */
106 return (acpi_handle)ret; 135 context->ret = handle;
136 return AE_OK;
137 }
138 /*
139 * There is more than one matching object with the same _ADR value.
140 * That really is unexpected, so we are kind of beyond the scope of the
141 * spec here. We have to choose which one to return, though.
142 *
143 * First, check if the previously found object is good enough and return
144 * its handle if so. Second, check the same for the object that we've
145 * just found.
146 */
147 if (!context->ret_checked) {
148 if (acpi_extra_checks_passed(context->ret, context->is_bridge))
149 return AE_CTRL_TERMINATE;
150 else
151 context->ret_checked = true;
152 }
153 if (acpi_extra_checks_passed(handle, context->is_bridge)) {
154 context->ret = handle;
155 return AE_CTRL_TERMINATE;
156 }
157 return AE_OK;
107} 158}
108EXPORT_SYMBOL(acpi_get_child); 159
160acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
161{
162 if (parent) {
163 struct find_child_context context = {
164 .addr = addr,
165 .is_bridge = is_bridge,
166 };
167
168 acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
169 NULL, &context, NULL);
170 return context.ret;
171 }
172 return NULL;
173}
174EXPORT_SYMBOL_GPL(acpi_find_child);
109 175
110int acpi_bind_one(struct device *dev, acpi_handle handle) 176int acpi_bind_one(struct device *dev, acpi_handle handle)
111{ 177{
112 struct acpi_device *acpi_dev; 178 struct acpi_device *acpi_dev;
113 acpi_status status; 179 acpi_status status;
114 struct acpi_device_physical_node *physical_node, *pn; 180 struct acpi_device_physical_node *physical_node, *pn;
115 char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; 181 char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
182 struct list_head *physnode_list;
183 unsigned int node_id;
116 int retval = -EINVAL; 184 int retval = -EINVAL;
117 185
118 if (ACPI_HANDLE(dev)) { 186 if (ACPI_HANDLE(dev)) {
@@ -139,25 +207,27 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
139 207
140 mutex_lock(&acpi_dev->physical_node_lock); 208 mutex_lock(&acpi_dev->physical_node_lock);
141 209
142 /* Sanity check. */ 210 /*
143 list_for_each_entry(pn, &acpi_dev->physical_node_list, node) 211 * Keep the list sorted by node_id so that the IDs of removed nodes can
212 * be recycled easily.
213 */
214 physnode_list = &acpi_dev->physical_node_list;
215 node_id = 0;
216 list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
217 /* Sanity check. */
144 if (pn->dev == dev) { 218 if (pn->dev == dev) {
145 dev_warn(dev, "Already associated with ACPI node\n"); 219 dev_warn(dev, "Already associated with ACPI node\n");
146 goto err_free; 220 goto err_free;
147 } 221 }
148 222 if (pn->node_id == node_id) {
149 /* allocate physical node id according to physical_node_id_bitmap */ 223 physnode_list = &pn->node;
150 physical_node->node_id = 224 node_id++;
151 find_first_zero_bit(acpi_dev->physical_node_id_bitmap, 225 }
152 ACPI_MAX_PHYSICAL_NODE);
153 if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
154 retval = -ENOSPC;
155 goto err_free;
156 } 226 }
157 227
158 set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); 228 physical_node->node_id = node_id;
159 physical_node->dev = dev; 229 physical_node->dev = dev;
160 list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); 230 list_add(&physical_node->node, physnode_list);
161 acpi_dev->physical_node_count++; 231 acpi_dev->physical_node_count++;
162 232
163 mutex_unlock(&acpi_dev->physical_node_lock); 233 mutex_unlock(&acpi_dev->physical_node_lock);
@@ -208,7 +278,7 @@ int acpi_unbind_one(struct device *dev)
208 278
209 mutex_lock(&acpi_dev->physical_node_lock); 279 mutex_lock(&acpi_dev->physical_node_lock);
210 list_for_each_safe(node, next, &acpi_dev->physical_node_list) { 280 list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
211 char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; 281 char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
212 282
213 entry = list_entry(node, struct acpi_device_physical_node, 283 entry = list_entry(node, struct acpi_device_physical_node,
214 node); 284 node);
@@ -216,7 +286,6 @@ int acpi_unbind_one(struct device *dev)
216 continue; 286 continue;
217 287
218 list_del(node); 288 list_del(node);
219 clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
220 289
221 acpi_dev->physical_node_count--; 290 acpi_dev->physical_node_count--;
222 291
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 227aca77ee1e..5da44e81dd4d 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -169,10 +169,8 @@ int acpi_create_platform_device(struct acpi_device *adev,
169 -------------------------------------------------------------------------- */ 169 -------------------------------------------------------------------------- */
170#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) 170#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
171bool acpi_video_backlight_quirks(void); 171bool acpi_video_backlight_quirks(void);
172bool acpi_video_verify_backlight_support(void);
173#else 172#else
174static inline bool acpi_video_backlight_quirks(void) { return false; } 173static inline bool acpi_video_backlight_quirks(void) { return false; }
175static inline bool acpi_video_verify_backlight_support(void) { return false; }
176#endif 174#endif
177 175
178#endif /* _ACPI_INTERNAL_H_ */ 176#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index aa1227a7e3f2..04a13784dd20 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
311 dev->pnp.bus_id, 311 dev->pnp.bus_id,
312 (u32) dev->wakeup.sleep_state); 312 (u32) dev->wakeup.sleep_state);
313 313
314 mutex_lock(&dev->physical_node_lock);
315
314 if (!dev->physical_node_count) { 316 if (!dev->physical_node_count) {
315 seq_printf(seq, "%c%-8s\n", 317 seq_printf(seq, "%c%-8s\n",
316 dev->wakeup.flags.run_wake ? '*' : ' ', 318 dev->wakeup.flags.run_wake ? '*' : ' ',
@@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
338 put_device(ldev); 340 put_device(ldev);
339 } 341 }
340 } 342 }
343
344 mutex_unlock(&dev->physical_node_lock);
341 } 345 }
342 mutex_unlock(&acpi_device_lock); 346 mutex_unlock(&acpi_device_lock);
343 return 0; 347 return 0;
@@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
347{ 351{
348 struct acpi_device_physical_node *entry; 352 struct acpi_device_physical_node *entry;
349 353
354 mutex_lock(&adev->physical_node_lock);
355
350 list_for_each_entry(entry, 356 list_for_each_entry(entry,
351 &adev->physical_node_list, node) 357 &adev->physical_node_list, node)
352 if (entry->dev && device_can_wakeup(entry->dev)) { 358 if (entry->dev && device_can_wakeup(entry->dev)) {
353 bool enable = !device_may_wakeup(entry->dev); 359 bool enable = !device_may_wakeup(entry->dev);
354 device_set_wakeup_enable(entry->dev, enable); 360 device_set_wakeup_enable(entry->dev, enable);
355 } 361 }
362
363 mutex_unlock(&adev->physical_node_lock);
356} 364}
357 365
358static ssize_t 366static ssize_t
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 6dd237e79b4f..3270d3c8ba4e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -689,7 +689,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
689 * Some systems always report current brightness level as maximum 689 * Some systems always report current brightness level as maximum
690 * through _BQC, we need to test another value for them. 690 * through _BQC, we need to test another value for them.
691 */ 691 */
692 test_level = current_level == max_level ? br->levels[2] : max_level; 692 test_level = current_level == max_level ? br->levels[3] : max_level;
693 693
694 result = acpi_video_device_lcd_set_level(device, test_level); 694 result = acpi_video_device_lcd_set_level(device, test_level);
695 if (result) 695 if (result)
@@ -908,10 +908,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
908 device->cap._DDC = 1; 908 device->cap._DDC = 1;
909 } 909 }
910 910
911 if (acpi_video_init_brightness(device)) 911 if (acpi_video_backlight_support()) {
912 return;
913
914 if (acpi_video_verify_backlight_support()) {
915 struct backlight_properties props; 912 struct backlight_properties props;
916 struct pci_dev *pdev; 913 struct pci_dev *pdev;
917 acpi_handle acpi_parent; 914 acpi_handle acpi_parent;
@@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
920 static int count = 0; 917 static int count = 0;
921 char *name; 918 char *name;
922 919
920 result = acpi_video_init_brightness(device);
921 if (result)
922 return;
923 name = kasprintf(GFP_KERNEL, "acpi_video%d", count); 923 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
924 if (!name) 924 if (!name)
925 return; 925 return;
@@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
979 if (result) 979 if (result)
980 printk(KERN_ERR PREFIX "Create sysfs link\n"); 980 printk(KERN_ERR PREFIX "Create sysfs link\n");
981 981
982 } else {
983 /* Remove the brightness object. */
984 kfree(device->brightness->levels);
985 kfree(device->brightness);
986 device->brightness = NULL;
987 } 982 }
988} 983}
989 984
@@ -1366,8 +1361,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
1366 unsigned long long level_current, level_next; 1361 unsigned long long level_current, level_next;
1367 int result = -EINVAL; 1362 int result = -EINVAL;
1368 1363
1369 /* no warning message if acpi_backlight=vendor or a quirk is used */ 1364 /* no warning message if acpi_backlight=vendor is used */
1370 if (!acpi_video_verify_backlight_support()) 1365 if (!acpi_video_backlight_support())
1371 return 0; 1366 return 0;
1372 1367
1373 if (!device->brightness) 1368 if (!device->brightness)
@@ -1875,46 +1870,6 @@ static int acpi_video_bus_remove(struct acpi_device *device)
1875 return 0; 1870 return 0;
1876} 1871}
1877 1872
1878static acpi_status video_unregister_backlight(acpi_handle handle, u32 lvl,
1879 void *context, void **rv)
1880{
1881 struct acpi_device *acpi_dev;
1882 struct acpi_video_bus *video;
1883 struct acpi_video_device *dev, *next;
1884
1885 if (acpi_bus_get_device(handle, &acpi_dev))
1886 return AE_OK;
1887
1888 if (acpi_match_device_ids(acpi_dev, video_device_ids))
1889 return AE_OK;
1890
1891 video = acpi_driver_data(acpi_dev);
1892 if (!video)
1893 return AE_OK;
1894
1895 acpi_video_bus_stop_devices(video);
1896 mutex_lock(&video->device_list_lock);
1897 list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
1898 if (dev->backlight) {
1899 backlight_device_unregister(dev->backlight);
1900 dev->backlight = NULL;
1901 kfree(dev->brightness->levels);
1902 kfree(dev->brightness);
1903 }
1904 if (dev->cooling_dev) {
1905 sysfs_remove_link(&dev->dev->dev.kobj,
1906 "thermal_cooling");
1907 sysfs_remove_link(&dev->cooling_dev->device.kobj,
1908 "device");
1909 thermal_cooling_device_unregister(dev->cooling_dev);
1910 dev->cooling_dev = NULL;
1911 }
1912 }
1913 mutex_unlock(&video->device_list_lock);
1914 acpi_video_bus_start_devices(video);
1915 return AE_OK;
1916}
1917
1918static int __init is_i740(struct pci_dev *dev) 1873static int __init is_i740(struct pci_dev *dev)
1919{ 1874{
1920 if (dev->device == 0x00D1) 1875 if (dev->device == 0x00D1)
@@ -1946,25 +1901,14 @@ static int __init intel_opregion_present(void)
1946 return opregion; 1901 return opregion;
1947} 1902}
1948 1903
1949int __acpi_video_register(bool backlight_quirks) 1904int acpi_video_register(void)
1950{ 1905{
1951 bool no_backlight; 1906 int result = 0;
1952 int result;
1953
1954 no_backlight = backlight_quirks ? acpi_video_backlight_quirks() : false;
1955
1956 if (register_count) { 1907 if (register_count) {
1957 /* 1908 /*
1958 * If acpi_video_register() has been called already, don't try 1909 * if the function of acpi_video_register is already called,
1959 * to register acpi_video_bus, but unregister backlight devices 1910 * don't register the acpi_vide_bus again and return no error.
1960 * if no backlight support is requested.
1961 */ 1911 */
1962 if (no_backlight)
1963 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1964 ACPI_UINT32_MAX,
1965 video_unregister_backlight,
1966 NULL, NULL, NULL);
1967
1968 return 0; 1912 return 0;
1969 } 1913 }
1970 1914
@@ -1980,7 +1924,7 @@ int __acpi_video_register(bool backlight_quirks)
1980 1924
1981 return 0; 1925 return 0;
1982} 1926}
1983EXPORT_SYMBOL(__acpi_video_register); 1927EXPORT_SYMBOL(acpi_video_register);
1984 1928
1985void acpi_video_unregister(void) 1929void acpi_video_unregister(void)
1986{ 1930{
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 826e52def080..c3397748ba46 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -238,12 +238,7 @@ static void acpi_video_caps_check(void)
238 238
239bool acpi_video_backlight_quirks(void) 239bool acpi_video_backlight_quirks(void)
240{ 240{
241 if (acpi_gbl_osi_data >= ACPI_OSI_WIN_8) { 241 return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
242 acpi_video_caps_check();
243 acpi_video_support |= ACPI_VIDEO_SKIP_BACKLIGHT;
244 return true;
245 }
246 return false;
247} 242}
248EXPORT_SYMBOL(acpi_video_backlight_quirks); 243EXPORT_SYMBOL(acpi_video_backlight_quirks);
249 244
@@ -291,14 +286,6 @@ int acpi_video_backlight_support(void)
291} 286}
292EXPORT_SYMBOL(acpi_video_backlight_support); 287EXPORT_SYMBOL(acpi_video_backlight_support);
293 288
294/* For the ACPI video driver use only. */
295bool acpi_video_verify_backlight_support(void)
296{
297 return (acpi_video_support & ACPI_VIDEO_SKIP_BACKLIGHT) ?
298 false : acpi_video_backlight_support();
299}
300EXPORT_SYMBOL(acpi_video_verify_backlight_support);
301
302/* 289/*
303 * Use acpi_backlight=vendor/video to force that backlight switching 290 * Use acpi_backlight=vendor/video to force that backlight switching
304 * is processed by vendor specific acpi drivers or video.ko driver. 291 * is processed by vendor specific acpi drivers or video.ko driver.
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 80dc988f01e4..4e737728aee2 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -97,6 +97,15 @@ config SATA_AHCI_PLATFORM
97 97
98 If unsure, say N. 98 If unsure, say N.
99 99
100config AHCI_IMX
101 tristate "Freescale i.MX AHCI SATA support"
102 depends on SATA_AHCI_PLATFORM && MFD_SYSCON
103 help
104 This option enables support for the Freescale i.MX SoC's
105 onboard AHCI SATA.
106
107 If unsure, say N.
108
100config SATA_FSL 109config SATA_FSL
101 tristate "Freescale 3.0Gbps SATA support" 110 tristate "Freescale 3.0Gbps SATA support"
102 depends on FSL_SOC 111 depends on FSL_SOC
@@ -107,7 +116,7 @@ config SATA_FSL
107 If unsure, say N. 116 If unsure, say N.
108 117
109config SATA_INIC162X 118config SATA_INIC162X
110 tristate "Initio 162x SATA support" 119 tristate "Initio 162x SATA support (Very Experimental)"
111 depends on PCI 120 depends on PCI
112 help 121 help
113 This option enables support for Initio 162x Serial ATA. 122 This option enables support for Initio 162x Serial ATA.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index c04d0fd038a3..46518c622460 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
10obj-$(CONFIG_SATA_SIL24) += sata_sil24.o 10obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
11obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o 11obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
12obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o 12obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
13obj-$(CONFIG_AHCI_IMX) += ahci_imx.o
13 14
14# SFF w/ custom DMA 15# SFF w/ custom DMA
15obj-$(CONFIG_PDC_ADMA) += pdc_adma.o 16obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5064f3ea20f1..db4380d70031 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1146,11 +1146,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
1146 return rc; 1146 return rc;
1147 1147
1148 for (i = 0; i < host->n_ports; i++) { 1148 for (i = 0; i < host->n_ports; i++) {
1149 const char* desc;
1149 struct ahci_port_priv *pp = host->ports[i]->private_data; 1150 struct ahci_port_priv *pp = host->ports[i]->private_data;
1150 1151
1152 /* pp is NULL for dummy ports */
1153 if (pp)
1154 desc = pp->irq_desc;
1155 else
1156 desc = dev_driver_string(host->dev);
1157
1151 rc = devm_request_threaded_irq(host->dev, 1158 rc = devm_request_threaded_irq(host->dev,
1152 irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED, 1159 irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
1153 pp->irq_desc, host->ports[i]); 1160 desc, host->ports[i]);
1154 if (rc) 1161 if (rc)
1155 goto out_free_irqs; 1162 goto out_free_irqs;
1156 } 1163 }
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
new file mode 100644
index 000000000000..58debb0acc3a
--- /dev/null
+++ b/drivers/ata/ahci_imx.c
@@ -0,0 +1,236 @@
1/*
2 * Freescale IMX AHCI SATA platform driver
3 * Copyright 2013 Freescale Semiconductor, Inc.
4 *
5 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/regmap.h>
24#include <linux/ahci_platform.h>
25#include <linux/of_device.h>
26#include <linux/mfd/syscon.h>
27#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
28#include "ahci.h"
29
30enum {
31 HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
32};
33
34struct imx_ahci_priv {
35 struct platform_device *ahci_pdev;
36 struct clk *sata_ref_clk;
37 struct clk *ahb_clk;
38 struct regmap *gpr;
39};
40
41static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
42{
43 int ret = 0;
44 unsigned int reg_val;
45 struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
46
47 imxpriv->gpr =
48 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
49 if (IS_ERR(imxpriv->gpr)) {
50 dev_err(dev, "failed to find fsl,imx6q-iomux-gpr regmap\n");
51 return PTR_ERR(imxpriv->gpr);
52 }
53
54 ret = clk_prepare_enable(imxpriv->sata_ref_clk);
55 if (ret < 0) {
56 dev_err(dev, "prepare-enable sata_ref clock err:%d\n", ret);
57 return ret;
58 }
59
60 /*
61 * set PHY Paremeters, two steps to configure the GPR13,
62 * one write for rest of parameters, mask of first write
63 * is 0x07fffffd, and the other one write for setting
64 * the mpll_clk_en.
65 */
66 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
67 | IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK
68 | IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK
69 | IMX6Q_GPR13_SATA_SPD_MODE_MASK
70 | IMX6Q_GPR13_SATA_MPLL_SS_EN
71 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
72 | IMX6Q_GPR13_SATA_TX_BOOST_MASK
73 | IMX6Q_GPR13_SATA_TX_LVL_MASK
74 | IMX6Q_GPR13_SATA_TX_EDGE_RATE
75 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
76 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
77 | IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F
78 | IMX6Q_GPR13_SATA_SPD_MODE_3P0G
79 | IMX6Q_GPR13_SATA_MPLL_SS_EN
80 | IMX6Q_GPR13_SATA_TX_ATTEN_9_16
81 | IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB
82 | IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
83 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
84 IMX6Q_GPR13_SATA_MPLL_CLK_EN);
85 usleep_range(100, 200);
86
87 /*
88 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
89 * and IP vendor specific register HOST_TIMER1MS.
90 * Configure CAP_SSS (support stagered spin up).
91 * Implement the port0.
92 * Get the ahb clock rate, and configure the TIMER1MS register.
93 */
94 reg_val = readl(mmio + HOST_CAP);
95 if (!(reg_val & HOST_CAP_SSS)) {
96 reg_val |= HOST_CAP_SSS;
97 writel(reg_val, mmio + HOST_CAP);
98 }
99 reg_val = readl(mmio + HOST_PORTS_IMPL);
100 if (!(reg_val & 0x1)) {
101 reg_val |= 0x1;
102 writel(reg_val, mmio + HOST_PORTS_IMPL);
103 }
104
105 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
106 writel(reg_val, mmio + HOST_TIMER1MS);
107
108 return 0;
109}
110
111static void imx6q_sata_exit(struct device *dev)
112{
113 struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
114
115 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
116 !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
117 clk_disable_unprepare(imxpriv->sata_ref_clk);
118}
119
120static struct ahci_platform_data imx6q_sata_pdata = {
121 .init = imx6q_sata_init,
122 .exit = imx6q_sata_exit,
123};
124
125static const struct of_device_id imx_ahci_of_match[] = {
126 { .compatible = "fsl,imx6q-ahci", .data = &imx6q_sata_pdata},
127 {},
128};
129MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
130
131static int imx_ahci_probe(struct platform_device *pdev)
132{
133 struct device *dev = &pdev->dev;
134 struct resource *mem, *irq, res[2];
135 const struct of_device_id *of_id;
136 const struct ahci_platform_data *pdata = NULL;
137 struct imx_ahci_priv *imxpriv;
138 struct device *ahci_dev;
139 struct platform_device *ahci_pdev;
140 int ret;
141
142 imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
143 if (!imxpriv) {
144 dev_err(dev, "can't alloc ahci_host_priv\n");
145 return -ENOMEM;
146 }
147
148 ahci_pdev = platform_device_alloc("ahci", -1);
149 if (!ahci_pdev)
150 return -ENODEV;
151
152 ahci_dev = &ahci_pdev->dev;
153 ahci_dev->parent = dev;
154
155 imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
156 if (IS_ERR(imxpriv->ahb_clk)) {
157 dev_err(dev, "can't get ahb clock.\n");
158 ret = PTR_ERR(imxpriv->ahb_clk);
159 goto err_out;
160 }
161
162 imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
163 if (IS_ERR(imxpriv->sata_ref_clk)) {
164 dev_err(dev, "can't get sata_ref clock.\n");
165 ret = PTR_ERR(imxpriv->sata_ref_clk);
166 goto err_out;
167 }
168
169 imxpriv->ahci_pdev = ahci_pdev;
170 platform_set_drvdata(pdev, imxpriv);
171
172 of_id = of_match_device(imx_ahci_of_match, dev);
173 if (of_id) {
174 pdata = of_id->data;
175 } else {
176 ret = -EINVAL;
177 goto err_out;
178 }
179
180 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
181 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
182 if (!mem || !irq) {
183 dev_err(dev, "no mmio/irq resource\n");
184 ret = -ENOMEM;
185 goto err_out;
186 }
187
188 res[0] = *mem;
189 res[1] = *irq;
190
191 ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32);
192 ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
193 ahci_dev->of_node = dev->of_node;
194
195 ret = platform_device_add_resources(ahci_pdev, res, 2);
196 if (ret)
197 goto err_out;
198
199 ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata));
200 if (ret)
201 goto err_out;
202
203 ret = platform_device_add(ahci_pdev);
204 if (ret) {
205err_out:
206 platform_device_put(ahci_pdev);
207 return ret;
208 }
209
210 return 0;
211}
212
213static int imx_ahci_remove(struct platform_device *pdev)
214{
215 struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev);
216 struct platform_device *ahci_pdev = imxpriv->ahci_pdev;
217
218 platform_device_unregister(ahci_pdev);
219 return 0;
220}
221
222static struct platform_driver imx_ahci_driver = {
223 .probe = imx_ahci_probe,
224 .remove = imx_ahci_remove,
225 .driver = {
226 .name = "ahci-imx",
227 .owner = THIS_MODULE,
228 .of_match_table = imx_ahci_of_match,
229 },
230};
231module_platform_driver(imx_ahci_driver);
232
233MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
234MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
235MODULE_LICENSE("GPL");
236MODULE_ALIAS("ahci:imx");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index b52a10c8eeb9..513ad7ed0c99 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -330,7 +330,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
330 /* SATA Controller IDE (Wellsburg) */ 330 /* SATA Controller IDE (Wellsburg) */
331 { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, 331 { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
332 /* SATA Controller IDE (Wellsburg) */ 332 /* SATA Controller IDE (Wellsburg) */
333 { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 333 { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
334 /* SATA Controller IDE (Wellsburg) */ 334 /* SATA Controller IDE (Wellsburg) */
335 { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, 335 { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
336 /* SATA Controller IDE (Wellsburg) */ 336 /* SATA Controller IDE (Wellsburg) */
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 1c41722bb7e2..20fd337a5731 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
289 289
290 /* Disable sending Early R_OK. 290 /* Disable sending Early R_OK.
291 * With "cached read" HDD testing and multiple ports busy on a SATA 291 * With "cached read" HDD testing and multiple ports busy on a SATA
292 * host controller, 3726 PMP will very rarely drop a deferred 292 * host controller, 3x26 PMP will very rarely drop a deferred
293 * R_OK that was intended for the host. Symptom will be all 293 * R_OK that was intended for the host. Symptom will be all
294 * 5 drives under test will timeout, get reset, and recover. 294 * 5 drives under test will timeout, get reset, and recover.
295 */ 295 */
296 if (vendor == 0x1095 && devid == 0x3726) { 296 if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
297 u32 reg; 297 u32 reg;
298 298
299 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg); 299 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
300 if (err_mask) { 300 if (err_mask) {
301 rc = -EIO; 301 rc = -EIO;
302 reason = "failed to read Sil3726 Private Register"; 302 reason = "failed to read Sil3x26 Private Register";
303 goto fail; 303 goto fail;
304 } 304 }
305 reg &= ~0x1; 305 reg &= ~0x1;
306 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); 306 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
307 if (err_mask) { 307 if (err_mask) {
308 rc = -EIO; 308 rc = -EIO;
309 reason = "failed to write Sil3726 Private Register"; 309 reason = "failed to write Sil3x26 Private Register";
310 goto fail; 310 goto fail;
311 } 311 }
312 } 312 }
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
383 u16 devid = sata_pmp_gscr_devid(gscr); 383 u16 devid = sata_pmp_gscr_devid(gscr);
384 struct ata_link *link; 384 struct ata_link *link;
385 385
386 if (vendor == 0x1095 && devid == 0x3726) { 386 if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
387 /* sil3726 quirks */ 387 /* sil3x26 quirks */
388 ata_for_each_link(link, ap, EDGE) { 388 ata_for_each_link(link, ap, EDGE) {
389 /* link reports offline after LPM */ 389 /* link reports offline after LPM */
390 link->flags |= ATA_LFLAG_NO_LPM; 390 link->flags |= ATA_LFLAG_NO_LPM;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 83c08907e042..b1e880a3c3da 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -206,8 +206,10 @@ static ssize_t ata_scsi_park_store(struct device *device,
206 unsigned long flags; 206 unsigned long flags;
207 int rc; 207 int rc;
208 208
209 rc = strict_strtol(buf, 10, &input); 209 rc = kstrtol(buf, 10, &input);
210 if (rc || input < -2) 210 if (rc)
211 return rc;
212 if (input < -2)
211 return -EINVAL; 213 return -EINVAL;
212 if (input > ATA_TMOUT_MAX_PARK) { 214 if (input > ATA_TMOUT_MAX_PARK) {
213 rc = -EOVERFLOW; 215 rc = -EOVERFLOW;
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 4ec7c04b3f82..26386f0b89a8 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -237,6 +237,7 @@ static const struct of_device_id imx_pata_dt_ids[] = {
237 /* sentinel */ 237 /* sentinel */
238 } 238 }
239}; 239};
240MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
240 241
241static struct platform_driver pata_imx_driver = { 242static struct platform_driver pata_imx_driver = {
242 .probe = pata_imx_probe, 243 .probe = pata_imx_probe,
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 19720a0a4a65..851bd3f43ac6 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
293{ 293{
294 struct sata_fsl_host_priv *host_priv = host->private_data; 294 struct sata_fsl_host_priv *host_priv = host->private_data;
295 void __iomem *hcr_base = host_priv->hcr_base; 295 void __iomem *hcr_base = host_priv->hcr_base;
296 unsigned long flags;
296 297
297 if (count > ICC_MAX_INT_COUNT_THRESHOLD) 298 if (count > ICC_MAX_INT_COUNT_THRESHOLD)
298 count = ICC_MAX_INT_COUNT_THRESHOLD; 299 count = ICC_MAX_INT_COUNT_THRESHOLD;
@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
305 (count > ICC_MIN_INT_COUNT_THRESHOLD)) 306 (count > ICC_MIN_INT_COUNT_THRESHOLD))
306 ticks = ICC_SAFE_INT_TICKS; 307 ticks = ICC_SAFE_INT_TICKS;
307 308
308 spin_lock(&host->lock); 309 spin_lock_irqsave(&host->lock, flags);
309 iowrite32((count << 24 | ticks), hcr_base + ICC); 310 iowrite32((count << 24 | ticks), hcr_base + ICC);
310 311
311 intr_coalescing_count = count; 312 intr_coalescing_count = count;
312 intr_coalescing_ticks = ticks; 313 intr_coalescing_ticks = ticks;
313 spin_unlock(&host->lock); 314 spin_unlock_irqrestore(&host->lock, flags);
314 315
315 DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", 316 DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
316 intr_coalescing_count, intr_coalescing_ticks); 317 intr_coalescing_count, intr_coalescing_ticks);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index d047d92a456f..e9a4f46d962e 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -86,11 +86,11 @@ struct ecx_plat_data {
86 86
87#define SGPIO_SIGNALS 3 87#define SGPIO_SIGNALS 3
88#define ECX_ACTIVITY_BITS 0x300000 88#define ECX_ACTIVITY_BITS 0x300000
89#define ECX_ACTIVITY_SHIFT 2 89#define ECX_ACTIVITY_SHIFT 0
90#define ECX_LOCATE_BITS 0x80000 90#define ECX_LOCATE_BITS 0x80000
91#define ECX_LOCATE_SHIFT 1 91#define ECX_LOCATE_SHIFT 1
92#define ECX_FAULT_BITS 0x400000 92#define ECX_FAULT_BITS 0x400000
93#define ECX_FAULT_SHIFT 0 93#define ECX_FAULT_SHIFT 2
94static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, 94static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
95 u32 shift) 95 u32 shift)
96{ 96{
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e45131748248..5c54d957370a 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -6,6 +6,18 @@
6 * 6 *
7 * This file is released under GPL v2. 7 * This file is released under GPL v2.
8 * 8 *
9 * **** WARNING ****
10 *
11 * This driver never worked properly and unfortunately data corruption is
12 * relatively common. There isn't anyone working on the driver and there's
13 * no support from the vendor. Do not use this driver in any production
14 * environment.
15 *
16 * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
17 * https://bugzilla.kernel.org/show_bug.cgi?id=60565
18 *
19 * *****************
20 *
9 * This controller is eccentric and easily locks up if something isn't 21 * This controller is eccentric and easily locks up if something isn't
10 * right. Documentation is available at initio's website but it only 22 * right. Documentation is available at initio's website but it only
11 * documents registers (not programming model). 23 * documents registers (not programming model).
@@ -807,6 +819,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
807 819
808 ata_print_version_once(&pdev->dev, DRV_VERSION); 820 ata_print_version_once(&pdev->dev, DRV_VERSION);
809 821
822 dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
823
810 /* alloc host */ 824 /* alloc host */
811 host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); 825 host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
812 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 826 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index e69102696533..3455f833e473 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -719,7 +719,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
719 } 719 }
720 } 720 }
721 721
722 return regcache_sync_block_raw_flush(map, &data, base, regtmp); 722 return regcache_sync_block_raw_flush(map, &data, base, regtmp +
723 map->reg_stride);
723} 724}
724 725
725int regcache_sync_block(struct regmap *map, void *block, 726int regcache_sync_block(struct regmap *map, void *block,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index b81ddfea1da0..e07a5fd58ad7 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -532,11 +532,11 @@ config BLK_DEV_RBD
532 If unsure, say N. 532 If unsure, say N.
533 533
534config BLK_DEV_RSXX 534config BLK_DEV_RSXX
535 tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver" 535 tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
536 depends on PCI 536 depends on PCI
537 help 537 help
538 Device driver for IBM's high speed PCIe SSD 538 Device driver for IBM's high speed PCIe SSD
539 storage devices: FlashSystem-70 and FlashSystem-80. 539 storage device: Flash Adapter 900GB Full Height.
540 540
541 To compile this driver as a module, choose M here: the 541 To compile this driver as a module, choose M here: the
542 module will be called rsxx. 542 module will be called rsxx.
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 99cb944a002d..4d45dba7fb8f 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio)
906 int i; 906 int i;
907 907
908 bio_for_each_segment(bv, bio, i) { 908 bio_for_each_segment(bv, bio, i) {
909 page = bv->bv_page;
910 /* Non-zero page count for non-head members of 909 /* Non-zero page count for non-head members of
911 * compound pages is no longer allowed by the kernel, 910 * compound pages is no longer allowed by the kernel.
912 * but this has never been seen here.
913 */ 911 */
914 if (unlikely(PageCompound(page))) 912 page = compound_trans_head(bv->bv_page);
915 if (compound_trans_head(page) != page) {
916 pr_crit("page tail used for block I/O\n");
917 BUG();
918 }
919 atomic_inc(&page->_count); 913 atomic_inc(&page->_count);
920 } 914 }
921} 915}
@@ -924,10 +918,13 @@ static void
924bio_pagedec(struct bio *bio) 918bio_pagedec(struct bio *bio)
925{ 919{
926 struct bio_vec *bv; 920 struct bio_vec *bv;
921 struct page *page;
927 int i; 922 int i;
928 923
929 bio_for_each_segment(bv, bio, i) 924 bio_for_each_segment(bv, bio, i) {
930 atomic_dec(&bv->bv_page->_count); 925 page = compound_trans_head(bv->bv_page);
926 atomic_dec(&page->_count);
927 }
931} 928}
932 929
933static void 930static void
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 6608076dc39e..28c73ca320a8 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -659,6 +659,27 @@ void drbd_al_shrink(struct drbd_conf *mdev)
659 wake_up(&mdev->al_wait); 659 wake_up(&mdev->al_wait);
660} 660}
661 661
662int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
663{
664 struct al_transaction_on_disk *al = buffer;
665 struct drbd_md *md = &mdev->ldev->md;
666 sector_t al_base = md->md_offset + md->al_offset;
667 int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
668 int i;
669
670 memset(al, 0, 4096);
671 al->magic = cpu_to_be32(DRBD_AL_MAGIC);
672 al->transaction_type = cpu_to_be16(AL_TR_INITIALIZED);
673 al->crc32c = cpu_to_be32(crc32c(0, al, 4096));
674
675 for (i = 0; i < al_size_4k; i++) {
676 int err = drbd_md_sync_page_io(mdev, mdev->ldev, al_base + i * 8, WRITE);
677 if (err)
678 return err;
679 }
680 return 0;
681}
682
662static int w_update_odbm(struct drbd_work *w, int unused) 683static int w_update_odbm(struct drbd_work *w, int unused)
663{ 684{
664 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); 685 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index f943aacfdad8..2d7f608d181c 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -832,6 +832,7 @@ struct drbd_tconn { /* is a resource from the config file */
832 unsigned susp_nod:1; /* IO suspended because no data */ 832 unsigned susp_nod:1; /* IO suspended because no data */
833 unsigned susp_fen:1; /* IO suspended because fence peer handler runs */ 833 unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
834 struct mutex cstate_mutex; /* Protects graceful disconnects */ 834 struct mutex cstate_mutex; /* Protects graceful disconnects */
835 unsigned int connect_cnt; /* Inc each time a connection is established */
835 836
836 unsigned long flags; 837 unsigned long flags;
837 struct net_conf *net_conf; /* content protected by rcu */ 838 struct net_conf *net_conf; /* content protected by rcu */
@@ -1132,6 +1133,7 @@ extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
1132void drbd_print_uuids(struct drbd_conf *mdev, const char *text); 1133void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
1133 1134
1134extern void conn_md_sync(struct drbd_tconn *tconn); 1135extern void conn_md_sync(struct drbd_tconn *tconn);
1136extern void drbd_md_write(struct drbd_conf *mdev, void *buffer);
1135extern void drbd_md_sync(struct drbd_conf *mdev); 1137extern void drbd_md_sync(struct drbd_conf *mdev);
1136extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); 1138extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
1137extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 1139extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
@@ -1466,8 +1468,16 @@ extern void drbd_suspend_io(struct drbd_conf *mdev);
1466extern void drbd_resume_io(struct drbd_conf *mdev); 1468extern void drbd_resume_io(struct drbd_conf *mdev);
1467extern char *ppsize(char *buf, unsigned long long size); 1469extern char *ppsize(char *buf, unsigned long long size);
1468extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int); 1470extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int);
1469enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1471enum determine_dev_size {
1470extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); 1472 DS_ERROR_SHRINK = -3,
1473 DS_ERROR_SPACE_MD = -2,
1474 DS_ERROR = -1,
1475 DS_UNCHANGED = 0,
1476 DS_SHRUNK = 1,
1477 DS_GREW = 2
1478};
1479extern enum determine_dev_size
1480drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local);
1471extern void resync_after_online_grow(struct drbd_conf *); 1481extern void resync_after_online_grow(struct drbd_conf *);
1472extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev); 1482extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
1473extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, 1483extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
@@ -1633,6 +1643,7 @@ extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
1633#define drbd_set_out_of_sync(mdev, sector, size) \ 1643#define drbd_set_out_of_sync(mdev, sector, size) \
1634 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) 1644 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
1635extern void drbd_al_shrink(struct drbd_conf *mdev); 1645extern void drbd_al_shrink(struct drbd_conf *mdev);
1646extern int drbd_initialize_al(struct drbd_conf *, void *);
1636 1647
1637/* drbd_nl.c */ 1648/* drbd_nl.c */
1638/* state info broadcast */ 1649/* state info broadcast */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a5dca6affcbb..55635edf563b 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2762,8 +2762,6 @@ int __init drbd_init(void)
2762 /* 2762 /*
2763 * allocate all necessary structs 2763 * allocate all necessary structs
2764 */ 2764 */
2765 err = -ENOMEM;
2766
2767 init_waitqueue_head(&drbd_pp_wait); 2765 init_waitqueue_head(&drbd_pp_wait);
2768 2766
2769 drbd_proc = NULL; /* play safe for drbd_cleanup */ 2767 drbd_proc = NULL; /* play safe for drbd_cleanup */
@@ -2773,6 +2771,7 @@ int __init drbd_init(void)
2773 if (err) 2771 if (err)
2774 goto fail; 2772 goto fail;
2775 2773
2774 err = -ENOMEM;
2776 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); 2775 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
2777 if (!drbd_proc) { 2776 if (!drbd_proc) {
2778 printk(KERN_ERR "drbd: unable to register proc file\n"); 2777 printk(KERN_ERR "drbd: unable to register proc file\n");
@@ -2803,7 +2802,6 @@ int __init drbd_init(void)
2803fail: 2802fail:
2804 drbd_cleanup(); 2803 drbd_cleanup();
2805 if (err == -ENOMEM) 2804 if (err == -ENOMEM)
2806 /* currently always the case */
2807 printk(KERN_ERR "drbd: ran out of memory\n"); 2805 printk(KERN_ERR "drbd: ran out of memory\n");
2808 else 2806 else
2809 printk(KERN_ERR "drbd: initialization failure\n"); 2807 printk(KERN_ERR "drbd: initialization failure\n");
@@ -2881,34 +2879,14 @@ struct meta_data_on_disk {
2881 u8 reserved_u8[4096 - (7*8 + 10*4)]; 2879 u8 reserved_u8[4096 - (7*8 + 10*4)];
2882} __packed; 2880} __packed;
2883 2881
2884/** 2882
2885 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set 2883
2886 * @mdev: DRBD device. 2884void drbd_md_write(struct drbd_conf *mdev, void *b)
2887 */
2888void drbd_md_sync(struct drbd_conf *mdev)
2889{ 2885{
2890 struct meta_data_on_disk *buffer; 2886 struct meta_data_on_disk *buffer = b;
2891 sector_t sector; 2887 sector_t sector;
2892 int i; 2888 int i;
2893 2889
2894 /* Don't accidentally change the DRBD meta data layout. */
2895 BUILD_BUG_ON(UI_SIZE != 4);
2896 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
2897
2898 del_timer(&mdev->md_sync_timer);
2899 /* timer may be rearmed by drbd_md_mark_dirty() now. */
2900 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2901 return;
2902
2903 /* We use here D_FAILED and not D_ATTACHING because we try to write
2904 * metadata even if we detach due to a disk failure! */
2905 if (!get_ldev_if_state(mdev, D_FAILED))
2906 return;
2907
2908 buffer = drbd_md_get_buffer(mdev);
2909 if (!buffer)
2910 goto out;
2911
2912 memset(buffer, 0, sizeof(*buffer)); 2890 memset(buffer, 0, sizeof(*buffer));
2913 2891
2914 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); 2892 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
@@ -2937,6 +2915,35 @@ void drbd_md_sync(struct drbd_conf *mdev)
2937 dev_err(DEV, "meta data update failed!\n"); 2915 dev_err(DEV, "meta data update failed!\n");
2938 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); 2916 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
2939 } 2917 }
2918}
2919
2920/**
2921 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2922 * @mdev: DRBD device.
2923 */
2924void drbd_md_sync(struct drbd_conf *mdev)
2925{
2926 struct meta_data_on_disk *buffer;
2927
2928 /* Don't accidentally change the DRBD meta data layout. */
2929 BUILD_BUG_ON(UI_SIZE != 4);
2930 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
2931
2932 del_timer(&mdev->md_sync_timer);
2933 /* timer may be rearmed by drbd_md_mark_dirty() now. */
2934 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2935 return;
2936
2937 /* We use here D_FAILED and not D_ATTACHING because we try to write
2938 * metadata even if we detach due to a disk failure! */
2939 if (!get_ldev_if_state(mdev, D_FAILED))
2940 return;
2941
2942 buffer = drbd_md_get_buffer(mdev);
2943 if (!buffer)
2944 goto out;
2945
2946 drbd_md_write(mdev, buffer);
2940 2947
2941 /* Update mdev->ldev->md.la_size_sect, 2948 /* Update mdev->ldev->md.la_size_sect,
2942 * since we updated it on metadata. */ 2949 * since we updated it on metadata. */
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 9e3f441e7e84..8cc1e640f485 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -417,6 +417,7 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
417 417
418bool conn_try_outdate_peer(struct drbd_tconn *tconn) 418bool conn_try_outdate_peer(struct drbd_tconn *tconn)
419{ 419{
420 unsigned int connect_cnt;
420 union drbd_state mask = { }; 421 union drbd_state mask = { };
421 union drbd_state val = { }; 422 union drbd_state val = { };
422 enum drbd_fencing_p fp; 423 enum drbd_fencing_p fp;
@@ -428,6 +429,10 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
428 return false; 429 return false;
429 } 430 }
430 431
432 spin_lock_irq(&tconn->req_lock);
433 connect_cnt = tconn->connect_cnt;
434 spin_unlock_irq(&tconn->req_lock);
435
431 fp = highest_fencing_policy(tconn); 436 fp = highest_fencing_policy(tconn);
432 switch (fp) { 437 switch (fp) {
433 case FP_NOT_AVAIL: 438 case FP_NOT_AVAIL:
@@ -492,8 +497,14 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
492 here, because we might were able to re-establish the connection in the 497 here, because we might were able to re-establish the connection in the
493 meantime. */ 498 meantime. */
494 spin_lock_irq(&tconn->req_lock); 499 spin_lock_irq(&tconn->req_lock);
495 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) 500 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) {
496 _conn_request_state(tconn, mask, val, CS_VERBOSE); 501 if (tconn->connect_cnt != connect_cnt)
502 /* In case the connection was established and droped
503 while the fence-peer handler was running, ignore it */
504 conn_info(tconn, "Ignoring fence-peer exit code\n");
505 else
506 _conn_request_state(tconn, mask, val, CS_VERBOSE);
507 }
497 spin_unlock_irq(&tconn->req_lock); 508 spin_unlock_irq(&tconn->req_lock);
498 509
499 return conn_highest_pdsk(tconn) <= D_OUTDATED; 510 return conn_highest_pdsk(tconn) <= D_OUTDATED;
@@ -816,15 +827,20 @@ void drbd_resume_io(struct drbd_conf *mdev)
816 * Returns 0 on success, negative return values indicate errors. 827 * Returns 0 on success, negative return values indicate errors.
817 * You should call drbd_md_sync() after calling this function. 828 * You should call drbd_md_sync() after calling this function.
818 */ 829 */
819enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 830enum determine_dev_size
831drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
820{ 832{
821 sector_t prev_first_sect, prev_size; /* previous meta location */ 833 sector_t prev_first_sect, prev_size; /* previous meta location */
822 sector_t la_size_sect, u_size; 834 sector_t la_size_sect, u_size;
835 struct drbd_md *md = &mdev->ldev->md;
836 u32 prev_al_stripe_size_4k;
837 u32 prev_al_stripes;
823 sector_t size; 838 sector_t size;
824 char ppb[10]; 839 char ppb[10];
840 void *buffer;
825 841
826 int md_moved, la_size_changed; 842 int md_moved, la_size_changed;
827 enum determine_dev_size rv = unchanged; 843 enum determine_dev_size rv = DS_UNCHANGED;
828 844
829 /* race: 845 /* race:
830 * application request passes inc_ap_bio, 846 * application request passes inc_ap_bio,
@@ -836,6 +852,11 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
836 * still lock the act_log to not trigger ASSERTs there. 852 * still lock the act_log to not trigger ASSERTs there.
837 */ 853 */
838 drbd_suspend_io(mdev); 854 drbd_suspend_io(mdev);
855 buffer = drbd_md_get_buffer(mdev); /* Lock meta-data IO */
856 if (!buffer) {
857 drbd_resume_io(mdev);
858 return DS_ERROR;
859 }
839 860
840 /* no wait necessary anymore, actually we could assert that */ 861 /* no wait necessary anymore, actually we could assert that */
841 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 862 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
@@ -844,7 +865,17 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
844 prev_size = mdev->ldev->md.md_size_sect; 865 prev_size = mdev->ldev->md.md_size_sect;
845 la_size_sect = mdev->ldev->md.la_size_sect; 866 la_size_sect = mdev->ldev->md.la_size_sect;
846 867
847 /* TODO: should only be some assert here, not (re)init... */ 868 if (rs) {
869 /* rs is non NULL if we should change the AL layout only */
870
871 prev_al_stripes = md->al_stripes;
872 prev_al_stripe_size_4k = md->al_stripe_size_4k;
873
874 md->al_stripes = rs->al_stripes;
875 md->al_stripe_size_4k = rs->al_stripe_size / 4;
876 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
877 }
878
848 drbd_md_set_sector_offsets(mdev, mdev->ldev); 879 drbd_md_set_sector_offsets(mdev, mdev->ldev);
849 880
850 rcu_read_lock(); 881 rcu_read_lock();
@@ -852,6 +883,21 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
852 rcu_read_unlock(); 883 rcu_read_unlock();
853 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED); 884 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
854 885
886 if (size < la_size_sect) {
887 if (rs && u_size == 0) {
888 /* Remove "rs &&" later. This check should always be active, but
889 right now the receiver expects the permissive behavior */
890 dev_warn(DEV, "Implicit shrink not allowed. "
891 "Use --size=%llus for explicit shrink.\n",
892 (unsigned long long)size);
893 rv = DS_ERROR_SHRINK;
894 }
895 if (u_size > size)
896 rv = DS_ERROR_SPACE_MD;
897 if (rv != DS_UNCHANGED)
898 goto err_out;
899 }
900
855 if (drbd_get_capacity(mdev->this_bdev) != size || 901 if (drbd_get_capacity(mdev->this_bdev) != size ||
856 drbd_bm_capacity(mdev) != size) { 902 drbd_bm_capacity(mdev) != size) {
857 int err; 903 int err;
@@ -867,7 +913,7 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
867 "Leaving size unchanged at size = %lu KB\n", 913 "Leaving size unchanged at size = %lu KB\n",
868 (unsigned long)size); 914 (unsigned long)size);
869 } 915 }
870 rv = dev_size_error; 916 rv = DS_ERROR;
871 } 917 }
872 /* racy, see comments above. */ 918 /* racy, see comments above. */
873 drbd_set_my_capacity(mdev, size); 919 drbd_set_my_capacity(mdev, size);
@@ -875,38 +921,57 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
875 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 921 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
876 (unsigned long long)size>>1); 922 (unsigned long long)size>>1);
877 } 923 }
878 if (rv == dev_size_error) 924 if (rv <= DS_ERROR)
879 goto out; 925 goto err_out;
880 926
881 la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect); 927 la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
882 928
883 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) 929 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
884 || prev_size != mdev->ldev->md.md_size_sect; 930 || prev_size != mdev->ldev->md.md_size_sect;
885 931
886 if (la_size_changed || md_moved) { 932 if (la_size_changed || md_moved || rs) {
887 int err; 933 u32 prev_flags;
888 934
889 drbd_al_shrink(mdev); /* All extents inactive. */ 935 drbd_al_shrink(mdev); /* All extents inactive. */
936
937 prev_flags = md->flags;
938 md->flags &= ~MDF_PRIMARY_IND;
939 drbd_md_write(mdev, buffer);
940
890 dev_info(DEV, "Writing the whole bitmap, %s\n", 941 dev_info(DEV, "Writing the whole bitmap, %s\n",
891 la_size_changed && md_moved ? "size changed and md moved" : 942 la_size_changed && md_moved ? "size changed and md moved" :
892 la_size_changed ? "size changed" : "md moved"); 943 la_size_changed ? "size changed" : "md moved");
893 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ 944 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
894 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write, 945 drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
895 "size changed", BM_LOCKED_MASK); 946 "size changed", BM_LOCKED_MASK);
896 if (err) { 947 drbd_initialize_al(mdev, buffer);
897 rv = dev_size_error; 948
898 goto out; 949 md->flags = prev_flags;
899 } 950 drbd_md_write(mdev, buffer);
900 drbd_md_mark_dirty(mdev); 951
952 if (rs)
953 dev_info(DEV, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
954 md->al_stripes, md->al_stripe_size_4k * 4);
901 } 955 }
902 956
903 if (size > la_size_sect) 957 if (size > la_size_sect)
904 rv = grew; 958 rv = DS_GREW;
905 if (size < la_size_sect) 959 if (size < la_size_sect)
906 rv = shrunk; 960 rv = DS_SHRUNK;
907out: 961
962 if (0) {
963 err_out:
964 if (rs) {
965 md->al_stripes = prev_al_stripes;
966 md->al_stripe_size_4k = prev_al_stripe_size_4k;
967 md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k;
968
969 drbd_md_set_sector_offsets(mdev, mdev->ldev);
970 }
971 }
908 lc_unlock(mdev->act_log); 972 lc_unlock(mdev->act_log);
909 wake_up(&mdev->al_wait); 973 wake_up(&mdev->al_wait);
974 drbd_md_put_buffer(mdev);
910 drbd_resume_io(mdev); 975 drbd_resume_io(mdev);
911 976
912 return rv; 977 return rv;
@@ -1607,11 +1672,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1607 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1672 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1608 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1673 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1609 1674
1610 dd = drbd_determine_dev_size(mdev, 0); 1675 dd = drbd_determine_dev_size(mdev, 0, NULL);
1611 if (dd == dev_size_error) { 1676 if (dd <= DS_ERROR) {
1612 retcode = ERR_NOMEM_BITMAP; 1677 retcode = ERR_NOMEM_BITMAP;
1613 goto force_diskless_dec; 1678 goto force_diskless_dec;
1614 } else if (dd == grew) 1679 } else if (dd == DS_GREW)
1615 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1680 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1616 1681
1617 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) || 1682 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
@@ -2305,6 +2370,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2305 struct drbd_conf *mdev; 2370 struct drbd_conf *mdev;
2306 enum drbd_ret_code retcode; 2371 enum drbd_ret_code retcode;
2307 enum determine_dev_size dd; 2372 enum determine_dev_size dd;
2373 bool change_al_layout = false;
2308 enum dds_flags ddsf; 2374 enum dds_flags ddsf;
2309 sector_t u_size; 2375 sector_t u_size;
2310 int err; 2376 int err;
@@ -2315,31 +2381,33 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2315 if (retcode != NO_ERROR) 2381 if (retcode != NO_ERROR)
2316 goto fail; 2382 goto fail;
2317 2383
2384 mdev = adm_ctx.mdev;
2385 if (!get_ldev(mdev)) {
2386 retcode = ERR_NO_DISK;
2387 goto fail;
2388 }
2389
2318 memset(&rs, 0, sizeof(struct resize_parms)); 2390 memset(&rs, 0, sizeof(struct resize_parms));
2391 rs.al_stripes = mdev->ldev->md.al_stripes;
2392 rs.al_stripe_size = mdev->ldev->md.al_stripe_size_4k * 4;
2319 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) { 2393 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2320 err = resize_parms_from_attrs(&rs, info); 2394 err = resize_parms_from_attrs(&rs, info);
2321 if (err) { 2395 if (err) {
2322 retcode = ERR_MANDATORY_TAG; 2396 retcode = ERR_MANDATORY_TAG;
2323 drbd_msg_put_info(from_attrs_err_to_txt(err)); 2397 drbd_msg_put_info(from_attrs_err_to_txt(err));
2324 goto fail; 2398 goto fail_ldev;
2325 } 2399 }
2326 } 2400 }
2327 2401
2328 mdev = adm_ctx.mdev;
2329 if (mdev->state.conn > C_CONNECTED) { 2402 if (mdev->state.conn > C_CONNECTED) {
2330 retcode = ERR_RESIZE_RESYNC; 2403 retcode = ERR_RESIZE_RESYNC;
2331 goto fail; 2404 goto fail_ldev;
2332 } 2405 }
2333 2406
2334 if (mdev->state.role == R_SECONDARY && 2407 if (mdev->state.role == R_SECONDARY &&
2335 mdev->state.peer == R_SECONDARY) { 2408 mdev->state.peer == R_SECONDARY) {
2336 retcode = ERR_NO_PRIMARY; 2409 retcode = ERR_NO_PRIMARY;
2337 goto fail; 2410 goto fail_ldev;
2338 }
2339
2340 if (!get_ldev(mdev)) {
2341 retcode = ERR_NO_DISK;
2342 goto fail;
2343 } 2411 }
2344 2412
2345 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) { 2413 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
@@ -2358,6 +2426,28 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2358 } 2426 }
2359 } 2427 }
2360 2428
2429 if (mdev->ldev->md.al_stripes != rs.al_stripes ||
2430 mdev->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2431 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2432
2433 if (al_size_k > (16 * 1024 * 1024)) {
2434 retcode = ERR_MD_LAYOUT_TOO_BIG;
2435 goto fail_ldev;
2436 }
2437
2438 if (al_size_k < MD_32kB_SECT/2) {
2439 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2440 goto fail_ldev;
2441 }
2442
2443 if (mdev->state.conn != C_CONNECTED) {
2444 retcode = ERR_MD_LAYOUT_CONNECTED;
2445 goto fail_ldev;
2446 }
2447
2448 change_al_layout = true;
2449 }
2450
2361 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) 2451 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2362 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 2452 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2363 2453
@@ -2373,16 +2463,22 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2373 } 2463 }
2374 2464
2375 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 2465 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2376 dd = drbd_determine_dev_size(mdev, ddsf); 2466 dd = drbd_determine_dev_size(mdev, ddsf, change_al_layout ? &rs : NULL);
2377 drbd_md_sync(mdev); 2467 drbd_md_sync(mdev);
2378 put_ldev(mdev); 2468 put_ldev(mdev);
2379 if (dd == dev_size_error) { 2469 if (dd == DS_ERROR) {
2380 retcode = ERR_NOMEM_BITMAP; 2470 retcode = ERR_NOMEM_BITMAP;
2381 goto fail; 2471 goto fail;
2472 } else if (dd == DS_ERROR_SPACE_MD) {
2473 retcode = ERR_MD_LAYOUT_NO_FIT;
2474 goto fail;
2475 } else if (dd == DS_ERROR_SHRINK) {
2476 retcode = ERR_IMPLICIT_SHRINK;
2477 goto fail;
2382 } 2478 }
2383 2479
2384 if (mdev->state.conn == C_CONNECTED) { 2480 if (mdev->state.conn == C_CONNECTED) {
2385 if (dd == grew) 2481 if (dd == DS_GREW)
2386 set_bit(RESIZE_PENDING, &mdev->flags); 2482 set_bit(RESIZE_PENDING, &mdev->flags);
2387 2483
2388 drbd_send_uuids(mdev); 2484 drbd_send_uuids(mdev);
@@ -2658,7 +2754,6 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2658 const struct sib_info *sib) 2754 const struct sib_info *sib)
2659{ 2755{
2660 struct state_info *si = NULL; /* for sizeof(si->member); */ 2756 struct state_info *si = NULL; /* for sizeof(si->member); */
2661 struct net_conf *nc;
2662 struct nlattr *nla; 2757 struct nlattr *nla;
2663 int got_ldev; 2758 int got_ldev;
2664 int err = 0; 2759 int err = 0;
@@ -2688,13 +2783,19 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2688 goto nla_put_failure; 2783 goto nla_put_failure;
2689 2784
2690 rcu_read_lock(); 2785 rcu_read_lock();
2691 if (got_ldev) 2786 if (got_ldev) {
2692 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive)) 2787 struct disk_conf *disk_conf;
2693 goto nla_put_failure;
2694 2788
2695 nc = rcu_dereference(mdev->tconn->net_conf); 2789 disk_conf = rcu_dereference(mdev->ldev->disk_conf);
2696 if (nc) 2790 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
2697 err = net_conf_to_skb(skb, nc, exclude_sensitive); 2791 }
2792 if (!err) {
2793 struct net_conf *nc;
2794
2795 nc = rcu_dereference(mdev->tconn->net_conf);
2796 if (nc)
2797 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2798 }
2698 rcu_read_unlock(); 2799 rcu_read_unlock();
2699 if (err) 2800 if (err)
2700 goto nla_put_failure; 2801 goto nla_put_failure;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 4222affff488..cc29cd3bf78b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1039,6 +1039,8 @@ randomize:
1039 rcu_read_lock(); 1039 rcu_read_lock();
1040 idr_for_each_entry(&tconn->volumes, mdev, vnr) { 1040 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1041 kref_get(&mdev->kref); 1041 kref_get(&mdev->kref);
1042 rcu_read_unlock();
1043
1042 /* Prevent a race between resync-handshake and 1044 /* Prevent a race between resync-handshake and
1043 * being promoted to Primary. 1045 * being promoted to Primary.
1044 * 1046 *
@@ -1049,8 +1051,6 @@ randomize:
1049 mutex_lock(mdev->state_mutex); 1051 mutex_lock(mdev->state_mutex);
1050 mutex_unlock(mdev->state_mutex); 1052 mutex_unlock(mdev->state_mutex);
1051 1053
1052 rcu_read_unlock();
1053
1054 if (discard_my_data) 1054 if (discard_my_data)
1055 set_bit(DISCARD_MY_DATA, &mdev->flags); 1055 set_bit(DISCARD_MY_DATA, &mdev->flags);
1056 else 1056 else
@@ -3545,7 +3545,7 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3545{ 3545{
3546 struct drbd_conf *mdev; 3546 struct drbd_conf *mdev;
3547 struct p_sizes *p = pi->data; 3547 struct p_sizes *p = pi->data;
3548 enum determine_dev_size dd = unchanged; 3548 enum determine_dev_size dd = DS_UNCHANGED;
3549 sector_t p_size, p_usize, my_usize; 3549 sector_t p_size, p_usize, my_usize;
3550 int ldsc = 0; /* local disk size changed */ 3550 int ldsc = 0; /* local disk size changed */
3551 enum dds_flags ddsf; 3551 enum dds_flags ddsf;
@@ -3617,9 +3617,9 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3617 3617
3618 ddsf = be16_to_cpu(p->dds_flags); 3618 ddsf = be16_to_cpu(p->dds_flags);
3619 if (get_ldev(mdev)) { 3619 if (get_ldev(mdev)) {
3620 dd = drbd_determine_dev_size(mdev, ddsf); 3620 dd = drbd_determine_dev_size(mdev, ddsf, NULL);
3621 put_ldev(mdev); 3621 put_ldev(mdev);
3622 if (dd == dev_size_error) 3622 if (dd == DS_ERROR)
3623 return -EIO; 3623 return -EIO;
3624 drbd_md_sync(mdev); 3624 drbd_md_sync(mdev);
3625 } else { 3625 } else {
@@ -3647,7 +3647,7 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3647 drbd_send_sizes(mdev, 0, ddsf); 3647 drbd_send_sizes(mdev, 0, ddsf);
3648 } 3648 }
3649 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || 3649 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3650 (dd == grew && mdev->state.conn == C_CONNECTED)) { 3650 (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
3651 if (mdev->state.pdsk >= D_INCONSISTENT && 3651 if (mdev->state.pdsk >= D_INCONSISTENT &&
3652 mdev->state.disk >= D_INCONSISTENT) { 3652 mdev->state.disk >= D_INCONSISTENT) {
3653 if (ddsf & DDSF_NO_RESYNC) 3653 if (ddsf & DDSF_NO_RESYNC)
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 90c5be2b1d30..216d47b7e88b 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1115,8 +1115,10 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1115 drbd_thread_restart_nowait(&mdev->tconn->receiver); 1115 drbd_thread_restart_nowait(&mdev->tconn->receiver);
1116 1116
1117 /* Resume AL writing if we get a connection */ 1117 /* Resume AL writing if we get a connection */
1118 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) 1118 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1119 drbd_resume_al(mdev); 1119 drbd_resume_al(mdev);
1120 mdev->tconn->connect_cnt++;
1121 }
1120 1122
1121 /* remember last attach time so request_timer_fn() won't 1123 /* remember last attach time so request_timer_fn() won't
1122 * kill newly established sessions while we are still trying to thaw 1124 * kill newly established sessions while we are still trying to thaw
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 5af21f2db29c..6e85e21445eb 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -31,6 +31,8 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/debugfs.h>
35#include <linux/seq_file.h>
34 36
35#include <linux/genhd.h> 37#include <linux/genhd.h>
36#include <linux/idr.h> 38#include <linux/idr.h>
@@ -39,8 +41,9 @@
39#include "rsxx_cfg.h" 41#include "rsxx_cfg.h"
40 42
41#define NO_LEGACY 0 43#define NO_LEGACY 0
44#define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */
42 45
43MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver"); 46MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver");
44MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM"); 47MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
45MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
46MODULE_VERSION(DRIVER_VERSION); 49MODULE_VERSION(DRIVER_VERSION);
@@ -49,9 +52,282 @@ static unsigned int force_legacy = NO_LEGACY;
49module_param(force_legacy, uint, 0444); 52module_param(force_legacy, uint, 0444);
50MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts"); 53MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
51 54
55static unsigned int sync_start = 1;
56module_param(sync_start, uint, 0444);
57MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
58 "until the card startup has completed.");
59
52static DEFINE_IDA(rsxx_disk_ida); 60static DEFINE_IDA(rsxx_disk_ida);
53static DEFINE_SPINLOCK(rsxx_ida_lock); 61static DEFINE_SPINLOCK(rsxx_ida_lock);
54 62
63/* --------------------Debugfs Setup ------------------- */
64
65struct rsxx_cram {
66 u32 f_pos;
67 u32 offset;
68 void *i_private;
69};
70
71static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p)
72{
73 struct rsxx_cardinfo *card = m->private;
74
75 seq_printf(m, "HWID 0x%08x\n",
76 ioread32(card->regmap + HWID));
77 seq_printf(m, "SCRATCH 0x%08x\n",
78 ioread32(card->regmap + SCRATCH));
79 seq_printf(m, "IER 0x%08x\n",
80 ioread32(card->regmap + IER));
81 seq_printf(m, "IPR 0x%08x\n",
82 ioread32(card->regmap + IPR));
83 seq_printf(m, "CREG_CMD 0x%08x\n",
84 ioread32(card->regmap + CREG_CMD));
85 seq_printf(m, "CREG_ADD 0x%08x\n",
86 ioread32(card->regmap + CREG_ADD));
87 seq_printf(m, "CREG_CNT 0x%08x\n",
88 ioread32(card->regmap + CREG_CNT));
89 seq_printf(m, "CREG_STAT 0x%08x\n",
90 ioread32(card->regmap + CREG_STAT));
91 seq_printf(m, "CREG_DATA0 0x%08x\n",
92 ioread32(card->regmap + CREG_DATA0));
93 seq_printf(m, "CREG_DATA1 0x%08x\n",
94 ioread32(card->regmap + CREG_DATA1));
95 seq_printf(m, "CREG_DATA2 0x%08x\n",
96 ioread32(card->regmap + CREG_DATA2));
97 seq_printf(m, "CREG_DATA3 0x%08x\n",
98 ioread32(card->regmap + CREG_DATA3));
99 seq_printf(m, "CREG_DATA4 0x%08x\n",
100 ioread32(card->regmap + CREG_DATA4));
101 seq_printf(m, "CREG_DATA5 0x%08x\n",
102 ioread32(card->regmap + CREG_DATA5));
103 seq_printf(m, "CREG_DATA6 0x%08x\n",
104 ioread32(card->regmap + CREG_DATA6));
105 seq_printf(m, "CREG_DATA7 0x%08x\n",
106 ioread32(card->regmap + CREG_DATA7));
107 seq_printf(m, "INTR_COAL 0x%08x\n",
108 ioread32(card->regmap + INTR_COAL));
109 seq_printf(m, "HW_ERROR 0x%08x\n",
110 ioread32(card->regmap + HW_ERROR));
111 seq_printf(m, "DEBUG0 0x%08x\n",
112 ioread32(card->regmap + PCI_DEBUG0));
113 seq_printf(m, "DEBUG1 0x%08x\n",
114 ioread32(card->regmap + PCI_DEBUG1));
115 seq_printf(m, "DEBUG2 0x%08x\n",
116 ioread32(card->regmap + PCI_DEBUG2));
117 seq_printf(m, "DEBUG3 0x%08x\n",
118 ioread32(card->regmap + PCI_DEBUG3));
119 seq_printf(m, "DEBUG4 0x%08x\n",
120 ioread32(card->regmap + PCI_DEBUG4));
121 seq_printf(m, "DEBUG5 0x%08x\n",
122 ioread32(card->regmap + PCI_DEBUG5));
123 seq_printf(m, "DEBUG6 0x%08x\n",
124 ioread32(card->regmap + PCI_DEBUG6));
125 seq_printf(m, "DEBUG7 0x%08x\n",
126 ioread32(card->regmap + PCI_DEBUG7));
127 seq_printf(m, "RECONFIG 0x%08x\n",
128 ioread32(card->regmap + PCI_RECONFIG));
129
130 return 0;
131}
132
133static int rsxx_attr_stats_show(struct seq_file *m, void *p)
134{
135 struct rsxx_cardinfo *card = m->private;
136 int i;
137
138 for (i = 0; i < card->n_targets; i++) {
139 seq_printf(m, "Ctrl %d CRC Errors = %d\n",
140 i, card->ctrl[i].stats.crc_errors);
141 seq_printf(m, "Ctrl %d Hard Errors = %d\n",
142 i, card->ctrl[i].stats.hard_errors);
143 seq_printf(m, "Ctrl %d Soft Errors = %d\n",
144 i, card->ctrl[i].stats.soft_errors);
145 seq_printf(m, "Ctrl %d Writes Issued = %d\n",
146 i, card->ctrl[i].stats.writes_issued);
147 seq_printf(m, "Ctrl %d Writes Failed = %d\n",
148 i, card->ctrl[i].stats.writes_failed);
149 seq_printf(m, "Ctrl %d Reads Issued = %d\n",
150 i, card->ctrl[i].stats.reads_issued);
151 seq_printf(m, "Ctrl %d Reads Failed = %d\n",
152 i, card->ctrl[i].stats.reads_failed);
153 seq_printf(m, "Ctrl %d Reads Retried = %d\n",
154 i, card->ctrl[i].stats.reads_retried);
155 seq_printf(m, "Ctrl %d Discards Issued = %d\n",
156 i, card->ctrl[i].stats.discards_issued);
157 seq_printf(m, "Ctrl %d Discards Failed = %d\n",
158 i, card->ctrl[i].stats.discards_failed);
159 seq_printf(m, "Ctrl %d DMA SW Errors = %d\n",
160 i, card->ctrl[i].stats.dma_sw_err);
161 seq_printf(m, "Ctrl %d DMA HW Faults = %d\n",
162 i, card->ctrl[i].stats.dma_hw_fault);
163 seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n",
164 i, card->ctrl[i].stats.dma_cancelled);
165 seq_printf(m, "Ctrl %d SW Queue Depth = %d\n",
166 i, card->ctrl[i].stats.sw_q_depth);
167 seq_printf(m, "Ctrl %d HW Queue Depth = %d\n",
168 i, atomic_read(&card->ctrl[i].stats.hw_q_depth));
169 }
170
171 return 0;
172}
173
174static int rsxx_attr_stats_open(struct inode *inode, struct file *file)
175{
176 return single_open(file, rsxx_attr_stats_show, inode->i_private);
177}
178
179static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file)
180{
181 return single_open(file, rsxx_attr_pci_regs_show, inode->i_private);
182}
183
184static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
185 size_t cnt, loff_t *ppos)
186{
187 struct rsxx_cram *info = fp->private_data;
188 struct rsxx_cardinfo *card = info->i_private;
189 char *buf;
190 int st;
191
192 buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL);
193 if (!buf)
194 return -ENOMEM;
195
196 info->f_pos = (u32)*ppos + info->offset;
197
198 st = rsxx_creg_read(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1);
199 if (st)
200 return st;
201
202 st = copy_to_user(ubuf, buf, cnt);
203 if (st)
204 return st;
205
206 info->offset += cnt;
207
208 kfree(buf);
209
210 return cnt;
211}
212
213static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf,
214 size_t cnt, loff_t *ppos)
215{
216 struct rsxx_cram *info = fp->private_data;
217 struct rsxx_cardinfo *card = info->i_private;
218 char *buf;
219 int st;
220
221 buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
225 st = copy_from_user(buf, ubuf, cnt);
226 if (st)
227 return st;
228
229 info->f_pos = (u32)*ppos + info->offset;
230
231 st = rsxx_creg_write(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1);
232 if (st)
233 return st;
234
235 info->offset += cnt;
236
237 kfree(buf);
238
239 return cnt;
240}
241
242static int rsxx_cram_open(struct inode *inode, struct file *file)
243{
244 struct rsxx_cram *info = kzalloc(sizeof(*info), GFP_KERNEL);
245 if (!info)
246 return -ENOMEM;
247
248 info->i_private = inode->i_private;
249 info->f_pos = file->f_pos;
250 file->private_data = info;
251
252 return 0;
253}
254
255static int rsxx_cram_release(struct inode *inode, struct file *file)
256{
257 struct rsxx_cram *info = file->private_data;
258
259 if (!info)
260 return 0;
261
262 kfree(info);
263 file->private_data = NULL;
264
265 return 0;
266}
267
268static const struct file_operations debugfs_cram_fops = {
269 .owner = THIS_MODULE,
270 .open = rsxx_cram_open,
271 .read = rsxx_cram_read,
272 .write = rsxx_cram_write,
273 .release = rsxx_cram_release,
274};
275
276static const struct file_operations debugfs_stats_fops = {
277 .owner = THIS_MODULE,
278 .open = rsxx_attr_stats_open,
279 .read = seq_read,
280 .llseek = seq_lseek,
281 .release = single_release,
282};
283
284static const struct file_operations debugfs_pci_regs_fops = {
285 .owner = THIS_MODULE,
286 .open = rsxx_attr_pci_regs_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
292static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card)
293{
294 struct dentry *debugfs_stats;
295 struct dentry *debugfs_pci_regs;
296 struct dentry *debugfs_cram;
297
298 card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL);
299 if (IS_ERR_OR_NULL(card->debugfs_dir))
300 goto failed_debugfs_dir;
301
302 debugfs_stats = debugfs_create_file("stats", S_IRUGO,
303 card->debugfs_dir, card,
304 &debugfs_stats_fops);
305 if (IS_ERR_OR_NULL(debugfs_stats))
306 goto failed_debugfs_stats;
307
308 debugfs_pci_regs = debugfs_create_file("pci_regs", S_IRUGO,
309 card->debugfs_dir, card,
310 &debugfs_pci_regs_fops);
311 if (IS_ERR_OR_NULL(debugfs_pci_regs))
312 goto failed_debugfs_pci_regs;
313
314 debugfs_cram = debugfs_create_file("cram", S_IRUGO | S_IWUSR,
315 card->debugfs_dir, card,
316 &debugfs_cram_fops);
317 if (IS_ERR_OR_NULL(debugfs_cram))
318 goto failed_debugfs_cram;
319
320 return;
321failed_debugfs_cram:
322 debugfs_remove(debugfs_pci_regs);
323failed_debugfs_pci_regs:
324 debugfs_remove(debugfs_stats);
325failed_debugfs_stats:
326 debugfs_remove(card->debugfs_dir);
327failed_debugfs_dir:
328 card->debugfs_dir = NULL;
329}
330
55/*----------------- Interrupt Control & Handling -------------------*/ 331/*----------------- Interrupt Control & Handling -------------------*/
56 332
57static void rsxx_mask_interrupts(struct rsxx_cardinfo *card) 333static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
@@ -163,12 +439,13 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
163 } 439 }
164 440
165 if (isr & CR_INTR_CREG) { 441 if (isr & CR_INTR_CREG) {
166 schedule_work(&card->creg_ctrl.done_work); 442 queue_work(card->creg_ctrl.creg_wq,
443 &card->creg_ctrl.done_work);
167 handled++; 444 handled++;
168 } 445 }
169 446
170 if (isr & CR_INTR_EVENT) { 447 if (isr & CR_INTR_EVENT) {
171 schedule_work(&card->event_work); 448 queue_work(card->event_wq, &card->event_work);
172 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); 449 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
173 handled++; 450 handled++;
174 } 451 }
@@ -329,7 +606,7 @@ static int rsxx_eeh_frozen(struct pci_dev *dev)
329 int i; 606 int i;
330 int st; 607 int st;
331 608
332 dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n"); 609 dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n");
333 610
334 card->eeh_state = 1; 611 card->eeh_state = 1;
335 rsxx_mask_interrupts(card); 612 rsxx_mask_interrupts(card);
@@ -367,15 +644,26 @@ static void rsxx_eeh_failure(struct pci_dev *dev)
367{ 644{
368 struct rsxx_cardinfo *card = pci_get_drvdata(dev); 645 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
369 int i; 646 int i;
647 int cnt = 0;
370 648
371 dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n"); 649 dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n");
372 650
373 card->eeh_state = 1; 651 card->eeh_state = 1;
652 card->halt = 1;
374 653
375 for (i = 0; i < card->n_targets; i++) 654 for (i = 0; i < card->n_targets; i++) {
376 del_timer_sync(&card->ctrl[i].activity_timer); 655 spin_lock_bh(&card->ctrl[i].queue_lock);
656 cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
657 &card->ctrl[i].queue);
658 spin_unlock_bh(&card->ctrl[i].queue_lock);
659
660 cnt += rsxx_dma_cancel(&card->ctrl[i]);
377 661
378 rsxx_eeh_cancel_dmas(card); 662 if (cnt)
663 dev_info(CARD_TO_DEV(card),
664 "Freed %d queued DMAs on channel %d\n",
665 cnt, card->ctrl[i].id);
666 }
379} 667}
380 668
381static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) 669static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
@@ -432,7 +720,7 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
432 int st; 720 int st;
433 721
434 dev_warn(&dev->dev, 722 dev_warn(&dev->dev,
435 "IBM FlashSystem PCI: recovering from slot reset.\n"); 723 "IBM Flash Adapter PCI: recovering from slot reset.\n");
436 724
437 st = pci_enable_device(dev); 725 st = pci_enable_device(dev);
438 if (st) 726 if (st)
@@ -485,7 +773,7 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
485 &card->ctrl[i].issue_dma_work); 773 &card->ctrl[i].issue_dma_work);
486 } 774 }
487 775
488 dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n"); 776 dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n");
489 777
490 return PCI_ERS_RESULT_RECOVERED; 778 return PCI_ERS_RESULT_RECOVERED;
491 779
@@ -528,6 +816,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
528{ 816{
529 struct rsxx_cardinfo *card; 817 struct rsxx_cardinfo *card;
530 int st; 818 int st;
819 unsigned int sync_timeout;
531 820
532 dev_info(&dev->dev, "PCI-Flash SSD discovered\n"); 821 dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
533 822
@@ -610,7 +899,11 @@ static int rsxx_pci_probe(struct pci_dev *dev,
610 } 899 }
611 900
612 /************* Setup Processor Command Interface *************/ 901 /************* Setup Processor Command Interface *************/
613 rsxx_creg_setup(card); 902 st = rsxx_creg_setup(card);
903 if (st) {
904 dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n");
905 goto failed_creg_setup;
906 }
614 907
615 spin_lock_irq(&card->irq_lock); 908 spin_lock_irq(&card->irq_lock);
616 rsxx_enable_ier_and_isr(card, CR_INTR_CREG); 909 rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
@@ -650,6 +943,12 @@ static int rsxx_pci_probe(struct pci_dev *dev,
650 } 943 }
651 944
652 /************* Setup Card Event Handler *************/ 945 /************* Setup Card Event Handler *************/
946 card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
947 if (!card->event_wq) {
948 dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
949 goto failed_event_handler;
950 }
951
653 INIT_WORK(&card->event_work, card_event_handler); 952 INIT_WORK(&card->event_work, card_event_handler);
654 953
655 st = rsxx_setup_dev(card); 954 st = rsxx_setup_dev(card);
@@ -676,6 +975,33 @@ static int rsxx_pci_probe(struct pci_dev *dev,
676 if (st) 975 if (st)
677 dev_crit(CARD_TO_DEV(card), 976 dev_crit(CARD_TO_DEV(card),
678 "Failed issuing card startup\n"); 977 "Failed issuing card startup\n");
978 if (sync_start) {
979 sync_timeout = SYNC_START_TIMEOUT;
980
981 dev_info(CARD_TO_DEV(card),
982 "Waiting for card to startup\n");
983
984 do {
985 ssleep(1);
986 sync_timeout--;
987
988 rsxx_get_card_state(card, &card->state);
989 } while (sync_timeout &&
990 (card->state == CARD_STATE_STARTING));
991
992 if (card->state == CARD_STATE_STARTING) {
993 dev_warn(CARD_TO_DEV(card),
994 "Card startup timed out\n");
995 card->size8 = 0;
996 } else {
997 dev_info(CARD_TO_DEV(card),
998 "card state: %s\n",
999 rsxx_card_state_to_str(card->state));
1000 st = rsxx_get_card_size8(card, &card->size8);
1001 if (st)
1002 card->size8 = 0;
1003 }
1004 }
679 } else if (card->state == CARD_STATE_GOOD || 1005 } else if (card->state == CARD_STATE_GOOD ||
680 card->state == CARD_STATE_RD_ONLY_FAULT) { 1006 card->state == CARD_STATE_RD_ONLY_FAULT) {
681 st = rsxx_get_card_size8(card, &card->size8); 1007 st = rsxx_get_card_size8(card, &card->size8);
@@ -685,12 +1011,21 @@ static int rsxx_pci_probe(struct pci_dev *dev,
685 1011
686 rsxx_attach_dev(card); 1012 rsxx_attach_dev(card);
687 1013
1014 /************* Setup Debugfs *************/
1015 rsxx_debugfs_dev_new(card);
1016
688 return 0; 1017 return 0;
689 1018
690failed_create_dev: 1019failed_create_dev:
1020 destroy_workqueue(card->event_wq);
1021 card->event_wq = NULL;
1022failed_event_handler:
691 rsxx_dma_destroy(card); 1023 rsxx_dma_destroy(card);
692failed_dma_setup: 1024failed_dma_setup:
693failed_compatiblity_check: 1025failed_compatiblity_check:
1026 destroy_workqueue(card->creg_ctrl.creg_wq);
1027 card->creg_ctrl.creg_wq = NULL;
1028failed_creg_setup:
694 spin_lock_irq(&card->irq_lock); 1029 spin_lock_irq(&card->irq_lock);
695 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 1030 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
696 spin_unlock_irq(&card->irq_lock); 1031 spin_unlock_irq(&card->irq_lock);
@@ -756,6 +1091,8 @@ static void rsxx_pci_remove(struct pci_dev *dev)
756 /* Prevent work_structs from re-queuing themselves. */ 1091 /* Prevent work_structs from re-queuing themselves. */
757 card->halt = 1; 1092 card->halt = 1;
758 1093
1094 debugfs_remove_recursive(card->debugfs_dir);
1095
759 free_irq(dev->irq, card); 1096 free_irq(dev->irq, card);
760 1097
761 if (!force_legacy) 1098 if (!force_legacy)
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index 4b5c020a0a65..926dce9c452f 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -431,6 +431,15 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card,
431 *hw_stat = completion.creg_status; 431 *hw_stat = completion.creg_status;
432 432
433 if (completion.st) { 433 if (completion.st) {
434 /*
435 * This read is needed to verify that there has not been any
436 * extreme errors that might have occurred, i.e. EEH. The
437 * function iowrite32 will not detect EEH errors, so it is
438 * necessary that we recover if such an error is the reason
439 * for the timeout. This is a dummy read.
440 */
441 ioread32(card->regmap + SCRATCH);
442
434 dev_warn(CARD_TO_DEV(card), 443 dev_warn(CARD_TO_DEV(card),
435 "creg command failed(%d x%08x)\n", 444 "creg command failed(%d x%08x)\n",
436 completion.st, addr); 445 completion.st, addr);
@@ -727,6 +736,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card)
727{ 736{
728 card->creg_ctrl.active_cmd = NULL; 737 card->creg_ctrl.active_cmd = NULL;
729 738
739 card->creg_ctrl.creg_wq =
740 create_singlethread_workqueue(DRIVER_NAME"_creg");
741 if (!card->creg_ctrl.creg_wq)
742 return -ENOMEM;
743
730 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done); 744 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
731 mutex_init(&card->creg_ctrl.reset_lock); 745 mutex_init(&card->creg_ctrl.reset_lock);
732 INIT_LIST_HEAD(&card->creg_ctrl.queue); 746 INIT_LIST_HEAD(&card->creg_ctrl.queue);
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 4346d17d2949..d7af441880be 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -155,7 +155,8 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
155 atomic_set(&meta->error, 1); 155 atomic_set(&meta->error, 1);
156 156
157 if (atomic_dec_and_test(&meta->pending_dmas)) { 157 if (atomic_dec_and_test(&meta->pending_dmas)) {
158 disk_stats_complete(card, meta->bio, meta->start_time); 158 if (!card->eeh_state && card->gendisk)
159 disk_stats_complete(card, meta->bio, meta->start_time);
159 160
160 bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); 161 bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0);
161 kmem_cache_free(bio_meta_pool, meta); 162 kmem_cache_free(bio_meta_pool, meta);
@@ -170,6 +171,12 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
170 171
171 might_sleep(); 172 might_sleep();
172 173
174 if (!card)
175 goto req_err;
176
177 if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
178 goto req_err;
179
173 if (unlikely(card->halt)) { 180 if (unlikely(card->halt)) {
174 st = -EFAULT; 181 st = -EFAULT;
175 goto req_err; 182 goto req_err;
@@ -196,7 +203,8 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
196 atomic_set(&bio_meta->pending_dmas, 0); 203 atomic_set(&bio_meta->pending_dmas, 0);
197 bio_meta->start_time = jiffies; 204 bio_meta->start_time = jiffies;
198 205
199 disk_stats_start(card, bio); 206 if (!unlikely(card->halt))
207 disk_stats_start(card, bio);
200 208
201 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", 209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
202 bio_data_dir(bio) ? 'W' : 'R', bio_meta, 210 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
@@ -225,24 +233,6 @@ static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
225 return (pci_rev >= RSXX_DISCARD_SUPPORT); 233 return (pci_rev >= RSXX_DISCARD_SUPPORT);
226} 234}
227 235
228static unsigned short rsxx_get_logical_block_size(
229 struct rsxx_cardinfo *card)
230{
231 u32 capabilities = 0;
232 int st;
233
234 st = rsxx_get_card_capabilities(card, &capabilities);
235 if (st)
236 dev_warn(CARD_TO_DEV(card),
237 "Failed reading card capabilities register\n");
238
239 /* Earlier firmware did not have support for 512 byte accesses */
240 if (capabilities & CARD_CAP_SUBPAGE_WRITES)
241 return 512;
242 else
243 return RSXX_HW_BLK_SIZE;
244}
245
246int rsxx_attach_dev(struct rsxx_cardinfo *card) 236int rsxx_attach_dev(struct rsxx_cardinfo *card)
247{ 237{
248 mutex_lock(&card->dev_lock); 238 mutex_lock(&card->dev_lock);
@@ -305,7 +295,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
305 return -ENOMEM; 295 return -ENOMEM;
306 } 296 }
307 297
308 blk_size = rsxx_get_logical_block_size(card); 298 blk_size = card->config.data.block_size;
309 299
310 blk_queue_make_request(card->queue, rsxx_make_request); 300 blk_queue_make_request(card->queue, rsxx_make_request);
311 blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); 301 blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
@@ -347,6 +337,7 @@ void rsxx_destroy_dev(struct rsxx_cardinfo *card)
347 card->gendisk = NULL; 337 card->gendisk = NULL;
348 338
349 blk_cleanup_queue(card->queue); 339 blk_cleanup_queue(card->queue);
340 card->queue->queuedata = NULL;
350 unregister_blkdev(card->major, DRIVER_NAME); 341 unregister_blkdev(card->major, DRIVER_NAME);
351} 342}
352 343
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 0607513cfb41..bed32f16b084 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -245,6 +245,22 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
245 kmem_cache_free(rsxx_dma_pool, dma); 245 kmem_cache_free(rsxx_dma_pool, dma);
246} 246}
247 247
248int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
249 struct list_head *q)
250{
251 struct rsxx_dma *dma;
252 struct rsxx_dma *tmp;
253 int cnt = 0;
254
255 list_for_each_entry_safe(dma, tmp, q, list) {
256 list_del(&dma->list);
257 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
258 cnt++;
259 }
260
261 return cnt;
262}
263
248static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, 264static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
249 struct rsxx_dma *dma) 265 struct rsxx_dma *dma)
250{ 266{
@@ -252,9 +268,10 @@ static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
252 * Requeued DMAs go to the front of the queue so they are issued 268 * Requeued DMAs go to the front of the queue so they are issued
253 * first. 269 * first.
254 */ 270 */
255 spin_lock(&ctrl->queue_lock); 271 spin_lock_bh(&ctrl->queue_lock);
272 ctrl->stats.sw_q_depth++;
256 list_add(&dma->list, &ctrl->queue); 273 list_add(&dma->list, &ctrl->queue);
257 spin_unlock(&ctrl->queue_lock); 274 spin_unlock_bh(&ctrl->queue_lock);
258} 275}
259 276
260static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, 277static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
@@ -329,6 +346,7 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
329static void dma_engine_stalled(unsigned long data) 346static void dma_engine_stalled(unsigned long data)
330{ 347{
331 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; 348 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
349 int cnt;
332 350
333 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || 351 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
334 unlikely(ctrl->card->eeh_state)) 352 unlikely(ctrl->card->eeh_state))
@@ -349,18 +367,28 @@ static void dma_engine_stalled(unsigned long data)
349 "DMA channel %d has stalled, faulting interface.\n", 367 "DMA channel %d has stalled, faulting interface.\n",
350 ctrl->id); 368 ctrl->id);
351 ctrl->card->dma_fault = 1; 369 ctrl->card->dma_fault = 1;
370
371 /* Clean up the DMA queue */
372 spin_lock(&ctrl->queue_lock);
373 cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
374 spin_unlock(&ctrl->queue_lock);
375
376 cnt += rsxx_dma_cancel(ctrl);
377
378 if (cnt)
379 dev_info(CARD_TO_DEV(ctrl->card),
380 "Freed %d queued DMAs on channel %d\n",
381 cnt, ctrl->id);
352 } 382 }
353} 383}
354 384
355static void rsxx_issue_dmas(struct work_struct *work) 385static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
356{ 386{
357 struct rsxx_dma_ctrl *ctrl;
358 struct rsxx_dma *dma; 387 struct rsxx_dma *dma;
359 int tag; 388 int tag;
360 int cmds_pending = 0; 389 int cmds_pending = 0;
361 struct hw_cmd *hw_cmd_buf; 390 struct hw_cmd *hw_cmd_buf;
362 391
363 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
364 hw_cmd_buf = ctrl->cmd.buf; 392 hw_cmd_buf = ctrl->cmd.buf;
365 393
366 if (unlikely(ctrl->card->halt) || 394 if (unlikely(ctrl->card->halt) ||
@@ -368,22 +396,22 @@ static void rsxx_issue_dmas(struct work_struct *work)
368 return; 396 return;
369 397
370 while (1) { 398 while (1) {
371 spin_lock(&ctrl->queue_lock); 399 spin_lock_bh(&ctrl->queue_lock);
372 if (list_empty(&ctrl->queue)) { 400 if (list_empty(&ctrl->queue)) {
373 spin_unlock(&ctrl->queue_lock); 401 spin_unlock_bh(&ctrl->queue_lock);
374 break; 402 break;
375 } 403 }
376 spin_unlock(&ctrl->queue_lock); 404 spin_unlock_bh(&ctrl->queue_lock);
377 405
378 tag = pop_tracker(ctrl->trackers); 406 tag = pop_tracker(ctrl->trackers);
379 if (tag == -1) 407 if (tag == -1)
380 break; 408 break;
381 409
382 spin_lock(&ctrl->queue_lock); 410 spin_lock_bh(&ctrl->queue_lock);
383 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); 411 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
384 list_del(&dma->list); 412 list_del(&dma->list);
385 ctrl->stats.sw_q_depth--; 413 ctrl->stats.sw_q_depth--;
386 spin_unlock(&ctrl->queue_lock); 414 spin_unlock_bh(&ctrl->queue_lock);
387 415
388 /* 416 /*
389 * This will catch any DMAs that slipped in right before the 417 * This will catch any DMAs that slipped in right before the
@@ -440,9 +468,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
440 } 468 }
441} 469}
442 470
443static void rsxx_dma_done(struct work_struct *work) 471static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
444{ 472{
445 struct rsxx_dma_ctrl *ctrl;
446 struct rsxx_dma *dma; 473 struct rsxx_dma *dma;
447 unsigned long flags; 474 unsigned long flags;
448 u16 count; 475 u16 count;
@@ -450,7 +477,6 @@ static void rsxx_dma_done(struct work_struct *work)
450 u8 tag; 477 u8 tag;
451 struct hw_status *hw_st_buf; 478 struct hw_status *hw_st_buf;
452 479
453 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
454 hw_st_buf = ctrl->status.buf; 480 hw_st_buf = ctrl->status.buf;
455 481
456 if (unlikely(ctrl->card->halt) || 482 if (unlikely(ctrl->card->halt) ||
@@ -520,33 +546,32 @@ static void rsxx_dma_done(struct work_struct *work)
520 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); 546 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
521 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); 547 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
522 548
523 spin_lock(&ctrl->queue_lock); 549 spin_lock_bh(&ctrl->queue_lock);
524 if (ctrl->stats.sw_q_depth) 550 if (ctrl->stats.sw_q_depth)
525 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); 551 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
526 spin_unlock(&ctrl->queue_lock); 552 spin_unlock_bh(&ctrl->queue_lock);
527} 553}
528 554
529static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card, 555static void rsxx_schedule_issue(struct work_struct *work)
530 struct list_head *q)
531{ 556{
532 struct rsxx_dma *dma; 557 struct rsxx_dma_ctrl *ctrl;
533 struct rsxx_dma *tmp;
534 int cnt = 0;
535 558
536 list_for_each_entry_safe(dma, tmp, q, list) { 559 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
537 list_del(&dma->list);
538 560
539 if (dma->dma_addr) 561 mutex_lock(&ctrl->work_lock);
540 pci_unmap_page(card->dev, dma->dma_addr, 562 rsxx_issue_dmas(ctrl);
541 get_dma_size(dma), 563 mutex_unlock(&ctrl->work_lock);
542 (dma->cmd == HW_CMD_BLK_WRITE) ? 564}
543 PCI_DMA_TODEVICE :
544 PCI_DMA_FROMDEVICE);
545 kmem_cache_free(rsxx_dma_pool, dma);
546 cnt++;
547 }
548 565
549 return cnt; 566static void rsxx_schedule_done(struct work_struct *work)
567{
568 struct rsxx_dma_ctrl *ctrl;
569
570 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
571
572 mutex_lock(&ctrl->work_lock);
573 rsxx_dma_done(ctrl);
574 mutex_unlock(&ctrl->work_lock);
550} 575}
551 576
552static int rsxx_queue_discard(struct rsxx_cardinfo *card, 577static int rsxx_queue_discard(struct rsxx_cardinfo *card,
@@ -698,10 +723,10 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
698 723
699 for (i = 0; i < card->n_targets; i++) { 724 for (i = 0; i < card->n_targets; i++) {
700 if (!list_empty(&dma_list[i])) { 725 if (!list_empty(&dma_list[i])) {
701 spin_lock(&card->ctrl[i].queue_lock); 726 spin_lock_bh(&card->ctrl[i].queue_lock);
702 card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; 727 card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
703 list_splice_tail(&dma_list[i], &card->ctrl[i].queue); 728 list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
704 spin_unlock(&card->ctrl[i].queue_lock); 729 spin_unlock_bh(&card->ctrl[i].queue_lock);
705 730
706 queue_work(card->ctrl[i].issue_wq, 731 queue_work(card->ctrl[i].issue_wq,
707 &card->ctrl[i].issue_dma_work); 732 &card->ctrl[i].issue_dma_work);
@@ -711,8 +736,11 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
711 return 0; 736 return 0;
712 737
713bvec_err: 738bvec_err:
714 for (i = 0; i < card->n_targets; i++) 739 for (i = 0; i < card->n_targets; i++) {
715 rsxx_cleanup_dma_queue(card, &dma_list[i]); 740 spin_lock_bh(&card->ctrl[i].queue_lock);
741 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]);
742 spin_unlock_bh(&card->ctrl[i].queue_lock);
743 }
716 744
717 return st; 745 return st;
718} 746}
@@ -780,6 +808,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
780 spin_lock_init(&ctrl->trackers->lock); 808 spin_lock_init(&ctrl->trackers->lock);
781 809
782 spin_lock_init(&ctrl->queue_lock); 810 spin_lock_init(&ctrl->queue_lock);
811 mutex_init(&ctrl->work_lock);
783 INIT_LIST_HEAD(&ctrl->queue); 812 INIT_LIST_HEAD(&ctrl->queue);
784 813
785 setup_timer(&ctrl->activity_timer, dma_engine_stalled, 814 setup_timer(&ctrl->activity_timer, dma_engine_stalled,
@@ -793,8 +822,8 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
793 if (!ctrl->done_wq) 822 if (!ctrl->done_wq)
794 return -ENOMEM; 823 return -ENOMEM;
795 824
796 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); 825 INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
797 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); 826 INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
798 827
799 st = rsxx_hw_buffers_init(dev, ctrl); 828 st = rsxx_hw_buffers_init(dev, ctrl);
800 if (st) 829 if (st)
@@ -918,13 +947,30 @@ failed_dma_setup:
918 return st; 947 return st;
919} 948}
920 949
950int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
951{
952 struct rsxx_dma *dma;
953 int i;
954 int cnt = 0;
955
956 /* Clean up issued DMAs */
957 for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
958 dma = get_tracker_dma(ctrl->trackers, i);
959 if (dma) {
960 atomic_dec(&ctrl->stats.hw_q_depth);
961 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
962 push_tracker(ctrl->trackers, i);
963 cnt++;
964 }
965 }
966
967 return cnt;
968}
921 969
922void rsxx_dma_destroy(struct rsxx_cardinfo *card) 970void rsxx_dma_destroy(struct rsxx_cardinfo *card)
923{ 971{
924 struct rsxx_dma_ctrl *ctrl; 972 struct rsxx_dma_ctrl *ctrl;
925 struct rsxx_dma *dma; 973 int i;
926 int i, j;
927 int cnt = 0;
928 974
929 for (i = 0; i < card->n_targets; i++) { 975 for (i = 0; i < card->n_targets; i++) {
930 ctrl = &card->ctrl[i]; 976 ctrl = &card->ctrl[i];
@@ -943,33 +989,11 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
943 del_timer_sync(&ctrl->activity_timer); 989 del_timer_sync(&ctrl->activity_timer);
944 990
945 /* Clean up the DMA queue */ 991 /* Clean up the DMA queue */
946 spin_lock(&ctrl->queue_lock); 992 spin_lock_bh(&ctrl->queue_lock);
947 cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue); 993 rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
948 spin_unlock(&ctrl->queue_lock); 994 spin_unlock_bh(&ctrl->queue_lock);
949
950 if (cnt)
951 dev_info(CARD_TO_DEV(card),
952 "Freed %d queued DMAs on channel %d\n",
953 cnt, i);
954
955 /* Clean up issued DMAs */
956 for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
957 dma = get_tracker_dma(ctrl->trackers, j);
958 if (dma) {
959 pci_unmap_page(card->dev, dma->dma_addr,
960 get_dma_size(dma),
961 (dma->cmd == HW_CMD_BLK_WRITE) ?
962 PCI_DMA_TODEVICE :
963 PCI_DMA_FROMDEVICE);
964 kmem_cache_free(rsxx_dma_pool, dma);
965 cnt++;
966 }
967 }
968 995
969 if (cnt) 996 rsxx_dma_cancel(ctrl);
970 dev_info(CARD_TO_DEV(card),
971 "Freed %d pending DMAs on channel %d\n",
972 cnt, i);
973 997
974 vfree(ctrl->trackers); 998 vfree(ctrl->trackers);
975 999
@@ -1013,7 +1037,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1013 cnt++; 1037 cnt++;
1014 } 1038 }
1015 1039
1016 spin_lock(&card->ctrl[i].queue_lock); 1040 spin_lock_bh(&card->ctrl[i].queue_lock);
1017 list_splice(&issued_dmas[i], &card->ctrl[i].queue); 1041 list_splice(&issued_dmas[i], &card->ctrl[i].queue);
1018 1042
1019 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); 1043 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
@@ -1028,7 +1052,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1028 PCI_DMA_TODEVICE : 1052 PCI_DMA_TODEVICE :
1029 PCI_DMA_FROMDEVICE); 1053 PCI_DMA_FROMDEVICE);
1030 } 1054 }
1031 spin_unlock(&card->ctrl[i].queue_lock); 1055 spin_unlock_bh(&card->ctrl[i].queue_lock);
1032 } 1056 }
1033 1057
1034 kfree(issued_dmas); 1058 kfree(issued_dmas);
@@ -1036,30 +1060,13 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1036 return 0; 1060 return 0;
1037} 1061}
1038 1062
1039void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
1040{
1041 struct rsxx_dma *dma;
1042 struct rsxx_dma *tmp;
1043 int i;
1044
1045 for (i = 0; i < card->n_targets; i++) {
1046 spin_lock(&card->ctrl[i].queue_lock);
1047 list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
1048 list_del(&dma->list);
1049
1050 rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
1051 }
1052 spin_unlock(&card->ctrl[i].queue_lock);
1053 }
1054}
1055
1056int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) 1063int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
1057{ 1064{
1058 struct rsxx_dma *dma; 1065 struct rsxx_dma *dma;
1059 int i; 1066 int i;
1060 1067
1061 for (i = 0; i < card->n_targets; i++) { 1068 for (i = 0; i < card->n_targets; i++) {
1062 spin_lock(&card->ctrl[i].queue_lock); 1069 spin_lock_bh(&card->ctrl[i].queue_lock);
1063 list_for_each_entry(dma, &card->ctrl[i].queue, list) { 1070 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1064 dma->dma_addr = pci_map_page(card->dev, dma->page, 1071 dma->dma_addr = pci_map_page(card->dev, dma->page,
1065 dma->pg_off, get_dma_size(dma), 1072 dma->pg_off, get_dma_size(dma),
@@ -1067,12 +1074,12 @@ int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
1067 PCI_DMA_TODEVICE : 1074 PCI_DMA_TODEVICE :
1068 PCI_DMA_FROMDEVICE); 1075 PCI_DMA_FROMDEVICE);
1069 if (!dma->dma_addr) { 1076 if (!dma->dma_addr) {
1070 spin_unlock(&card->ctrl[i].queue_lock); 1077 spin_unlock_bh(&card->ctrl[i].queue_lock);
1071 kmem_cache_free(rsxx_dma_pool, dma); 1078 kmem_cache_free(rsxx_dma_pool, dma);
1072 return -ENOMEM; 1079 return -ENOMEM;
1073 } 1080 }
1074 } 1081 }
1075 spin_unlock(&card->ctrl[i].queue_lock); 1082 spin_unlock_bh(&card->ctrl[i].queue_lock);
1076 } 1083 }
1077 1084
1078 return 0; 1085 return 0;
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 382e8bf5c03b..5ad5055a4104 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -39,6 +39,7 @@
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40#include <linux/timer.h> 40#include <linux/timer.h>
41#include <linux/ioctl.h> 41#include <linux/ioctl.h>
42#include <linux/delay.h>
42 43
43#include "rsxx.h" 44#include "rsxx.h"
44#include "rsxx_cfg.h" 45#include "rsxx_cfg.h"
@@ -114,6 +115,7 @@ struct rsxx_dma_ctrl {
114 struct timer_list activity_timer; 115 struct timer_list activity_timer;
115 struct dma_tracker_list *trackers; 116 struct dma_tracker_list *trackers;
116 struct rsxx_dma_stats stats; 117 struct rsxx_dma_stats stats;
118 struct mutex work_lock;
117}; 119};
118 120
119struct rsxx_cardinfo { 121struct rsxx_cardinfo {
@@ -134,6 +136,7 @@ struct rsxx_cardinfo {
134 spinlock_t lock; 136 spinlock_t lock;
135 bool active; 137 bool active;
136 struct creg_cmd *active_cmd; 138 struct creg_cmd *active_cmd;
139 struct workqueue_struct *creg_wq;
137 struct work_struct done_work; 140 struct work_struct done_work;
138 struct list_head queue; 141 struct list_head queue;
139 unsigned int q_depth; 142 unsigned int q_depth;
@@ -154,6 +157,7 @@ struct rsxx_cardinfo {
154 int buf_len; 157 int buf_len;
155 } log; 158 } log;
156 159
160 struct workqueue_struct *event_wq;
157 struct work_struct event_work; 161 struct work_struct event_work;
158 unsigned int state; 162 unsigned int state;
159 u64 size8; 163 u64 size8;
@@ -181,6 +185,8 @@ struct rsxx_cardinfo {
181 185
182 int n_targets; 186 int n_targets;
183 struct rsxx_dma_ctrl *ctrl; 187 struct rsxx_dma_ctrl *ctrl;
188
189 struct dentry *debugfs_dir;
184}; 190};
185 191
186enum rsxx_pci_regmap { 192enum rsxx_pci_regmap {
@@ -283,6 +289,7 @@ enum rsxx_creg_addr {
283 CREG_ADD_CAPABILITIES = 0x80001050, 289 CREG_ADD_CAPABILITIES = 0x80001050,
284 CREG_ADD_LOG = 0x80002000, 290 CREG_ADD_LOG = 0x80002000,
285 CREG_ADD_NUM_TARGETS = 0x80003000, 291 CREG_ADD_NUM_TARGETS = 0x80003000,
292 CREG_ADD_CRAM = 0xA0000000,
286 CREG_ADD_CONFIG = 0xB0000000, 293 CREG_ADD_CONFIG = 0xB0000000,
287}; 294};
288 295
@@ -372,6 +379,8 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
372int rsxx_dma_setup(struct rsxx_cardinfo *card); 379int rsxx_dma_setup(struct rsxx_cardinfo *card);
373void rsxx_dma_destroy(struct rsxx_cardinfo *card); 380void rsxx_dma_destroy(struct rsxx_cardinfo *card);
374int rsxx_dma_init(void); 381int rsxx_dma_init(void);
382int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q);
383int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
375void rsxx_dma_cleanup(void); 384void rsxx_dma_cleanup(void);
376void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); 385void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
377int rsxx_dma_configure(struct rsxx_cardinfo *card); 386int rsxx_dma_configure(struct rsxx_cardinfo *card);
@@ -382,7 +391,6 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
382 void *cb_data); 391 void *cb_data);
383int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); 392int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
384int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); 393int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
385void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card);
386int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); 394int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
387 395
388/***** cregs.c *****/ 396/***** cregs.c *****/
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index dd5b2fed97e9..bf4b9d282c04 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -50,110 +50,118 @@
50#include "common.h" 50#include "common.h"
51 51
52/* 52/*
53 * These are rather arbitrary. They are fairly large because adjacent requests 53 * Maximum number of unused free pages to keep in the internal buffer.
54 * pulled from a communication ring are quite likely to end up being part of 54 * Setting this to a value too low will reduce memory used in each backend,
55 * the same scatter/gather request at the disc. 55 * but can have a performance penalty.
56 * 56 *
57 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW ** 57 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
58 * 58 * be set to a lower value that might degrade performance on some intensive
59 * This will increase the chances of being able to write whole tracks. 59 * IO workloads.
60 * 64 should be enough to keep us competitive with Linux.
61 */ 60 */
62static int xen_blkif_reqs = 64;
63module_param_named(reqs, xen_blkif_reqs, int, 0);
64MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
65 61
66/* Run-time switchable: /sys/module/blkback/parameters/ */ 62static int xen_blkif_max_buffer_pages = 1024;
67static unsigned int log_stats; 63module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
68module_param(log_stats, int, 0644); 64MODULE_PARM_DESC(max_buffer_pages,
65"Maximum number of free pages to keep in each block backend buffer");
69 66
70/* 67/*
71 * Each outstanding request that we've passed to the lower device layers has a 68 * Maximum number of grants to map persistently in blkback. For maximum
72 * 'pending_req' allocated to it. Each buffer_head that completes decrements 69 * performance this should be the total numbers of grants that can be used
73 * the pendcnt towards zero. When it hits zero, the specified domain has a 70 * to fill the ring, but since this might become too high, specially with
74 * response queued for it, with the saved 'id' passed back. 71 * the use of indirect descriptors, we set it to a value that provides good
72 * performance without using too much memory.
73 *
74 * When the list of persistent grants is full we clean it up using a LRU
75 * algorithm.
75 */ 76 */
76struct pending_req {
77 struct xen_blkif *blkif;
78 u64 id;
79 int nr_pages;
80 atomic_t pendcnt;
81 unsigned short operation;
82 int status;
83 struct list_head free_list;
84 DECLARE_BITMAP(unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
85};
86 77
87#define BLKBACK_INVALID_HANDLE (~0) 78static int xen_blkif_max_pgrants = 1056;
79module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
80MODULE_PARM_DESC(max_persistent_grants,
81 "Maximum number of grants to map persistently");
88 82
89struct xen_blkbk { 83/*
90 struct pending_req *pending_reqs; 84 * The LRU mechanism to clean the lists of persistent grants needs to
91 /* List of all 'pending_req' available */ 85 * be executed periodically. The time interval between consecutive executions
92 struct list_head pending_free; 86 * of the purge mechanism is set in ms.
93 /* And its spinlock. */ 87 */
94 spinlock_t pending_free_lock; 88#define LRU_INTERVAL 100
95 wait_queue_head_t pending_free_wq;
96 /* The list of all pages that are available. */
97 struct page **pending_pages;
98 /* And the grant handles that are available. */
99 grant_handle_t *pending_grant_handles;
100};
101
102static struct xen_blkbk *blkbk;
103 89
104/* 90/*
105 * Maximum number of grant pages that can be mapped in blkback. 91 * When the persistent grants list is full we will remove unused grants
106 * BLKIF_MAX_SEGMENTS_PER_REQUEST * RING_SIZE is the maximum number of 92 * from the list. The percent number of grants to be removed at each LRU
107 * pages that blkback will persistently map. 93 * execution.
108 * Currently, this is:
109 * RING_SIZE = 32 (for all known ring types)
110 * BLKIF_MAX_SEGMENTS_PER_REQUEST = 11
111 * sizeof(struct persistent_gnt) = 48
112 * So the maximum memory used to store the grants is:
113 * 32 * 11 * 48 = 16896 bytes
114 */ 94 */
115static inline unsigned int max_mapped_grant_pages(enum blkif_protocol protocol) 95#define LRU_PERCENT_CLEAN 5
96
97/* Run-time switchable: /sys/module/blkback/parameters/ */
98static unsigned int log_stats;
99module_param(log_stats, int, 0644);
100
101#define BLKBACK_INVALID_HANDLE (~0)
102
103/* Number of free pages to remove on each call to free_xenballooned_pages */
104#define NUM_BATCH_FREE_PAGES 10
105
106static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
116{ 107{
117 switch (protocol) { 108 unsigned long flags;
118 case BLKIF_PROTOCOL_NATIVE: 109
119 return __CONST_RING_SIZE(blkif, PAGE_SIZE) * 110 spin_lock_irqsave(&blkif->free_pages_lock, flags);
120 BLKIF_MAX_SEGMENTS_PER_REQUEST; 111 if (list_empty(&blkif->free_pages)) {
121 case BLKIF_PROTOCOL_X86_32: 112 BUG_ON(blkif->free_pages_num != 0);
122 return __CONST_RING_SIZE(blkif_x86_32, PAGE_SIZE) * 113 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
123 BLKIF_MAX_SEGMENTS_PER_REQUEST; 114 return alloc_xenballooned_pages(1, page, false);
124 case BLKIF_PROTOCOL_X86_64:
125 return __CONST_RING_SIZE(blkif_x86_64, PAGE_SIZE) *
126 BLKIF_MAX_SEGMENTS_PER_REQUEST;
127 default:
128 BUG();
129 } 115 }
116 BUG_ON(blkif->free_pages_num == 0);
117 page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
118 list_del(&page[0]->lru);
119 blkif->free_pages_num--;
120 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
121
130 return 0; 122 return 0;
131} 123}
132 124
133 125static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
134/* 126 int num)
135 * Little helpful macro to figure out the index and virtual address of the
136 * pending_pages[..]. For each 'pending_req' we have have up to
137 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
138 * 10 and would index in the pending_pages[..].
139 */
140static inline int vaddr_pagenr(struct pending_req *req, int seg)
141{ 127{
142 return (req - blkbk->pending_reqs) * 128 unsigned long flags;
143 BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; 129 int i;
144}
145 130
146#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] 131 spin_lock_irqsave(&blkif->free_pages_lock, flags);
132 for (i = 0; i < num; i++)
133 list_add(&page[i]->lru, &blkif->free_pages);
134 blkif->free_pages_num += num;
135 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
136}
147 137
148static inline unsigned long vaddr(struct pending_req *req, int seg) 138static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
149{ 139{
150 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); 140 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
151 return (unsigned long)pfn_to_kaddr(pfn); 141 struct page *page[NUM_BATCH_FREE_PAGES];
152} 142 unsigned int num_pages = 0;
143 unsigned long flags;
153 144
154#define pending_handle(_req, _seg) \ 145 spin_lock_irqsave(&blkif->free_pages_lock, flags);
155 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) 146 while (blkif->free_pages_num > num) {
147 BUG_ON(list_empty(&blkif->free_pages));
148 page[num_pages] = list_first_entry(&blkif->free_pages,
149 struct page, lru);
150 list_del(&page[num_pages]->lru);
151 blkif->free_pages_num--;
152 if (++num_pages == NUM_BATCH_FREE_PAGES) {
153 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
154 free_xenballooned_pages(num_pages, page);
155 spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 num_pages = 0;
157 }
158 }
159 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
160 if (num_pages != 0)
161 free_xenballooned_pages(num_pages, page);
162}
156 163
164#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
157 165
158static int do_block_io_op(struct xen_blkif *blkif); 166static int do_block_io_op(struct xen_blkif *blkif);
159static int dispatch_rw_block_io(struct xen_blkif *blkif, 167static int dispatch_rw_block_io(struct xen_blkif *blkif,
@@ -170,13 +178,29 @@ static void make_response(struct xen_blkif *blkif, u64 id,
170 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) 178 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
171 179
172 180
173static void add_persistent_gnt(struct rb_root *root, 181/*
182 * We don't need locking around the persistent grant helpers
183 * because blkback uses a single-thread for each backed, so we
184 * can be sure that this functions will never be called recursively.
185 *
186 * The only exception to that is put_persistent_grant, that can be called
187 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
188 * bit operations to modify the flags of a persistent grant and to count
189 * the number of used grants.
190 */
191static int add_persistent_gnt(struct xen_blkif *blkif,
174 struct persistent_gnt *persistent_gnt) 192 struct persistent_gnt *persistent_gnt)
175{ 193{
176 struct rb_node **new = &(root->rb_node), *parent = NULL; 194 struct rb_node **new = NULL, *parent = NULL;
177 struct persistent_gnt *this; 195 struct persistent_gnt *this;
178 196
197 if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
198 if (!blkif->vbd.overflow_max_grants)
199 blkif->vbd.overflow_max_grants = 1;
200 return -EBUSY;
201 }
179 /* Figure out where to put new node */ 202 /* Figure out where to put new node */
203 new = &blkif->persistent_gnts.rb_node;
180 while (*new) { 204 while (*new) {
181 this = container_of(*new, struct persistent_gnt, node); 205 this = container_of(*new, struct persistent_gnt, node);
182 206
@@ -186,22 +210,28 @@ static void add_persistent_gnt(struct rb_root *root,
186 else if (persistent_gnt->gnt > this->gnt) 210 else if (persistent_gnt->gnt > this->gnt)
187 new = &((*new)->rb_right); 211 new = &((*new)->rb_right);
188 else { 212 else {
189 pr_alert(DRV_PFX " trying to add a gref that's already in the tree\n"); 213 pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
190 BUG(); 214 return -EINVAL;
191 } 215 }
192 } 216 }
193 217
218 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
219 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
194 /* Add new node and rebalance tree. */ 220 /* Add new node and rebalance tree. */
195 rb_link_node(&(persistent_gnt->node), parent, new); 221 rb_link_node(&(persistent_gnt->node), parent, new);
196 rb_insert_color(&(persistent_gnt->node), root); 222 rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
223 blkif->persistent_gnt_c++;
224 atomic_inc(&blkif->persistent_gnt_in_use);
225 return 0;
197} 226}
198 227
199static struct persistent_gnt *get_persistent_gnt(struct rb_root *root, 228static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
200 grant_ref_t gref) 229 grant_ref_t gref)
201{ 230{
202 struct persistent_gnt *data; 231 struct persistent_gnt *data;
203 struct rb_node *node = root->rb_node; 232 struct rb_node *node = NULL;
204 233
234 node = blkif->persistent_gnts.rb_node;
205 while (node) { 235 while (node) {
206 data = container_of(node, struct persistent_gnt, node); 236 data = container_of(node, struct persistent_gnt, node);
207 237
@@ -209,13 +239,31 @@ static struct persistent_gnt *get_persistent_gnt(struct rb_root *root,
209 node = node->rb_left; 239 node = node->rb_left;
210 else if (gref > data->gnt) 240 else if (gref > data->gnt)
211 node = node->rb_right; 241 node = node->rb_right;
212 else 242 else {
243 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
244 pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
245 return NULL;
246 }
247 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
248 atomic_inc(&blkif->persistent_gnt_in_use);
213 return data; 249 return data;
250 }
214 } 251 }
215 return NULL; 252 return NULL;
216} 253}
217 254
218static void free_persistent_gnts(struct rb_root *root, unsigned int num) 255static void put_persistent_gnt(struct xen_blkif *blkif,
256 struct persistent_gnt *persistent_gnt)
257{
258 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
259 pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
260 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
261 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
262 atomic_dec(&blkif->persistent_gnt_in_use);
263}
264
265static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
266 unsigned int num)
219{ 267{
220 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 268 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
221 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 269 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -240,7 +288,7 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
240 ret = gnttab_unmap_refs(unmap, NULL, pages, 288 ret = gnttab_unmap_refs(unmap, NULL, pages,
241 segs_to_unmap); 289 segs_to_unmap);
242 BUG_ON(ret); 290 BUG_ON(ret);
243 free_xenballooned_pages(segs_to_unmap, pages); 291 put_free_pages(blkif, pages, segs_to_unmap);
244 segs_to_unmap = 0; 292 segs_to_unmap = 0;
245 } 293 }
246 294
@@ -251,21 +299,148 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
251 BUG_ON(num != 0); 299 BUG_ON(num != 0);
252} 300}
253 301
302static void unmap_purged_grants(struct work_struct *work)
303{
304 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
305 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
306 struct persistent_gnt *persistent_gnt;
307 int ret, segs_to_unmap = 0;
308 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
309
310 while(!list_empty(&blkif->persistent_purge_list)) {
311 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
312 struct persistent_gnt,
313 remove_node);
314 list_del(&persistent_gnt->remove_node);
315
316 gnttab_set_unmap_op(&unmap[segs_to_unmap],
317 vaddr(persistent_gnt->page),
318 GNTMAP_host_map,
319 persistent_gnt->handle);
320
321 pages[segs_to_unmap] = persistent_gnt->page;
322
323 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
324 ret = gnttab_unmap_refs(unmap, NULL, pages,
325 segs_to_unmap);
326 BUG_ON(ret);
327 put_free_pages(blkif, pages, segs_to_unmap);
328 segs_to_unmap = 0;
329 }
330 kfree(persistent_gnt);
331 }
332 if (segs_to_unmap > 0) {
333 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
334 BUG_ON(ret);
335 put_free_pages(blkif, pages, segs_to_unmap);
336 }
337}
338
339static void purge_persistent_gnt(struct xen_blkif *blkif)
340{
341 struct persistent_gnt *persistent_gnt;
342 struct rb_node *n;
343 unsigned int num_clean, total;
344 bool scan_used = false, clean_used = false;
345 struct rb_root *root;
346
347 if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
348 (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
349 !blkif->vbd.overflow_max_grants)) {
350 return;
351 }
352
353 if (work_pending(&blkif->persistent_purge_work)) {
354 pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
355 return;
356 }
357
358 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
359 num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
360 num_clean = min(blkif->persistent_gnt_c, num_clean);
361 if ((num_clean == 0) ||
362 (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
363 return;
364
365 /*
366 * At this point, we can assure that there will be no calls
367 * to get_persistent_grant (because we are executing this code from
368 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
369 * which means that the number of currently used grants will go down,
370 * but never up, so we will always be able to remove the requested
371 * number of grants.
372 */
373
374 total = num_clean;
375
376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
377
378 INIT_LIST_HEAD(&blkif->persistent_purge_list);
379 root = &blkif->persistent_gnts;
380purge_list:
381 foreach_grant_safe(persistent_gnt, n, root, node) {
382 BUG_ON(persistent_gnt->handle ==
383 BLKBACK_INVALID_HANDLE);
384
385 if (clean_used) {
386 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
387 continue;
388 }
389
390 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
391 continue;
392 if (!scan_used &&
393 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
394 continue;
395
396 rb_erase(&persistent_gnt->node, root);
397 list_add(&persistent_gnt->remove_node,
398 &blkif->persistent_purge_list);
399 if (--num_clean == 0)
400 goto finished;
401 }
402 /*
403 * If we get here it means we also need to start cleaning
404 * grants that were used since last purge in order to cope
405 * with the requested num
406 */
407 if (!scan_used && !clean_used) {
408 pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
409 scan_used = true;
410 goto purge_list;
411 }
412finished:
413 if (!clean_used) {
414 pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
415 clean_used = true;
416 goto purge_list;
417 }
418
419 blkif->persistent_gnt_c -= (total - num_clean);
420 blkif->vbd.overflow_max_grants = 0;
421
422 /* We can defer this work */
423 INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
424 schedule_work(&blkif->persistent_purge_work);
425 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
426 return;
427}
428
254/* 429/*
255 * Retrieve from the 'pending_reqs' a free pending_req structure to be used. 430 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
256 */ 431 */
257static struct pending_req *alloc_req(void) 432static struct pending_req *alloc_req(struct xen_blkif *blkif)
258{ 433{
259 struct pending_req *req = NULL; 434 struct pending_req *req = NULL;
260 unsigned long flags; 435 unsigned long flags;
261 436
262 spin_lock_irqsave(&blkbk->pending_free_lock, flags); 437 spin_lock_irqsave(&blkif->pending_free_lock, flags);
263 if (!list_empty(&blkbk->pending_free)) { 438 if (!list_empty(&blkif->pending_free)) {
264 req = list_entry(blkbk->pending_free.next, struct pending_req, 439 req = list_entry(blkif->pending_free.next, struct pending_req,
265 free_list); 440 free_list);
266 list_del(&req->free_list); 441 list_del(&req->free_list);
267 } 442 }
268 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); 443 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
269 return req; 444 return req;
270} 445}
271 446
@@ -273,17 +448,17 @@ static struct pending_req *alloc_req(void)
273 * Return the 'pending_req' structure back to the freepool. We also 448 * Return the 'pending_req' structure back to the freepool. We also
274 * wake up the thread if it was waiting for a free page. 449 * wake up the thread if it was waiting for a free page.
275 */ 450 */
276static void free_req(struct pending_req *req) 451static void free_req(struct xen_blkif *blkif, struct pending_req *req)
277{ 452{
278 unsigned long flags; 453 unsigned long flags;
279 int was_empty; 454 int was_empty;
280 455
281 spin_lock_irqsave(&blkbk->pending_free_lock, flags); 456 spin_lock_irqsave(&blkif->pending_free_lock, flags);
282 was_empty = list_empty(&blkbk->pending_free); 457 was_empty = list_empty(&blkif->pending_free);
283 list_add(&req->free_list, &blkbk->pending_free); 458 list_add(&req->free_list, &blkif->pending_free);
284 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); 459 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
285 if (was_empty) 460 if (was_empty)
286 wake_up(&blkbk->pending_free_wq); 461 wake_up(&blkif->pending_free_wq);
287} 462}
288 463
289/* 464/*
@@ -382,10 +557,12 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
382static void print_stats(struct xen_blkif *blkif) 557static void print_stats(struct xen_blkif *blkif)
383{ 558{
384 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" 559 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
385 " | ds %4llu\n", 560 " | ds %4llu | pg: %4u/%4d\n",
386 current->comm, blkif->st_oo_req, 561 current->comm, blkif->st_oo_req,
387 blkif->st_rd_req, blkif->st_wr_req, 562 blkif->st_rd_req, blkif->st_wr_req,
388 blkif->st_f_req, blkif->st_ds_req); 563 blkif->st_f_req, blkif->st_ds_req,
564 blkif->persistent_gnt_c,
565 xen_blkif_max_pgrants);
389 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); 566 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
390 blkif->st_rd_req = 0; 567 blkif->st_rd_req = 0;
391 blkif->st_wr_req = 0; 568 blkif->st_wr_req = 0;
@@ -397,6 +574,8 @@ int xen_blkif_schedule(void *arg)
397{ 574{
398 struct xen_blkif *blkif = arg; 575 struct xen_blkif *blkif = arg;
399 struct xen_vbd *vbd = &blkif->vbd; 576 struct xen_vbd *vbd = &blkif->vbd;
577 unsigned long timeout;
578 int ret;
400 579
401 xen_blkif_get(blkif); 580 xen_blkif_get(blkif);
402 581
@@ -406,27 +585,52 @@ int xen_blkif_schedule(void *arg)
406 if (unlikely(vbd->size != vbd_sz(vbd))) 585 if (unlikely(vbd->size != vbd_sz(vbd)))
407 xen_vbd_resize(blkif); 586 xen_vbd_resize(blkif);
408 587
409 wait_event_interruptible( 588 timeout = msecs_to_jiffies(LRU_INTERVAL);
589
590 timeout = wait_event_interruptible_timeout(
410 blkif->wq, 591 blkif->wq,
411 blkif->waiting_reqs || kthread_should_stop()); 592 blkif->waiting_reqs || kthread_should_stop(),
412 wait_event_interruptible( 593 timeout);
413 blkbk->pending_free_wq, 594 if (timeout == 0)
414 !list_empty(&blkbk->pending_free) || 595 goto purge_gnt_list;
415 kthread_should_stop()); 596 timeout = wait_event_interruptible_timeout(
597 blkif->pending_free_wq,
598 !list_empty(&blkif->pending_free) ||
599 kthread_should_stop(),
600 timeout);
601 if (timeout == 0)
602 goto purge_gnt_list;
416 603
417 blkif->waiting_reqs = 0; 604 blkif->waiting_reqs = 0;
418 smp_mb(); /* clear flag *before* checking for work */ 605 smp_mb(); /* clear flag *before* checking for work */
419 606
420 if (do_block_io_op(blkif)) 607 ret = do_block_io_op(blkif);
608 if (ret > 0)
421 blkif->waiting_reqs = 1; 609 blkif->waiting_reqs = 1;
610 if (ret == -EACCES)
611 wait_event_interruptible(blkif->shutdown_wq,
612 kthread_should_stop());
613
614purge_gnt_list:
615 if (blkif->vbd.feature_gnt_persistent &&
616 time_after(jiffies, blkif->next_lru)) {
617 purge_persistent_gnt(blkif);
618 blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
619 }
620
621 /* Shrink if we have more than xen_blkif_max_buffer_pages */
622 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
422 623
423 if (log_stats && time_after(jiffies, blkif->st_print)) 624 if (log_stats && time_after(jiffies, blkif->st_print))
424 print_stats(blkif); 625 print_stats(blkif);
425 } 626 }
426 627
628 /* Since we are shutting down remove all pages from the buffer */
629 shrink_free_pagepool(blkif, 0 /* All */);
630
427 /* Free all persistent grant pages */ 631 /* Free all persistent grant pages */
428 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) 632 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
429 free_persistent_gnts(&blkif->persistent_gnts, 633 free_persistent_gnts(blkif, &blkif->persistent_gnts,
430 blkif->persistent_gnt_c); 634 blkif->persistent_gnt_c);
431 635
432 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 636 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
@@ -441,148 +645,98 @@ int xen_blkif_schedule(void *arg)
441 return 0; 645 return 0;
442} 646}
443 647
444struct seg_buf {
445 unsigned int offset;
446 unsigned int nsec;
447};
448/* 648/*
449 * Unmap the grant references, and also remove the M2P over-rides 649 * Unmap the grant references, and also remove the M2P over-rides
450 * used in the 'pending_req'. 650 * used in the 'pending_req'.
451 */ 651 */
452static void xen_blkbk_unmap(struct pending_req *req) 652static void xen_blkbk_unmap(struct xen_blkif *blkif,
653 struct grant_page *pages[],
654 int num)
453{ 655{
454 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 656 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
455 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 657 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
456 unsigned int i, invcount = 0; 658 unsigned int i, invcount = 0;
457 grant_handle_t handle;
458 int ret; 659 int ret;
459 660
460 for (i = 0; i < req->nr_pages; i++) { 661 for (i = 0; i < num; i++) {
461 if (!test_bit(i, req->unmap_seg)) 662 if (pages[i]->persistent_gnt != NULL) {
663 put_persistent_gnt(blkif, pages[i]->persistent_gnt);
462 continue; 664 continue;
463 handle = pending_handle(req, i); 665 }
464 if (handle == BLKBACK_INVALID_HANDLE) 666 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
465 continue; 667 continue;
466 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), 668 unmap_pages[invcount] = pages[i]->page;
467 GNTMAP_host_map, handle); 669 gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
468 pending_handle(req, i) = BLKBACK_INVALID_HANDLE; 670 GNTMAP_host_map, pages[i]->handle);
469 pages[invcount] = virt_to_page(vaddr(req, i)); 671 pages[i]->handle = BLKBACK_INVALID_HANDLE;
470 invcount++; 672 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
673 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
674 invcount);
675 BUG_ON(ret);
676 put_free_pages(blkif, unmap_pages, invcount);
677 invcount = 0;
678 }
679 }
680 if (invcount) {
681 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
682 BUG_ON(ret);
683 put_free_pages(blkif, unmap_pages, invcount);
471 } 684 }
472
473 ret = gnttab_unmap_refs(unmap, NULL, pages, invcount);
474 BUG_ON(ret);
475} 685}
476 686
477static int xen_blkbk_map(struct blkif_request *req, 687static int xen_blkbk_map(struct xen_blkif *blkif,
478 struct pending_req *pending_req, 688 struct grant_page *pages[],
479 struct seg_buf seg[], 689 int num, bool ro)
480 struct page *pages[])
481{ 690{
482 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 691 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
483 struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST];
484 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 692 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
485 struct persistent_gnt *persistent_gnt = NULL; 693 struct persistent_gnt *persistent_gnt = NULL;
486 struct xen_blkif *blkif = pending_req->blkif;
487 phys_addr_t addr = 0; 694 phys_addr_t addr = 0;
488 int i, j; 695 int i, seg_idx, new_map_idx;
489 bool new_map;
490 int nseg = req->u.rw.nr_segments;
491 int segs_to_map = 0; 696 int segs_to_map = 0;
492 int ret = 0; 697 int ret = 0;
698 int last_map = 0, map_until = 0;
493 int use_persistent_gnts; 699 int use_persistent_gnts;
494 700
495 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); 701 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
496 702
497 BUG_ON(blkif->persistent_gnt_c >
498 max_mapped_grant_pages(pending_req->blkif->blk_protocol));
499
500 /* 703 /*
501 * Fill out preq.nr_sects with proper amount of sectors, and setup 704 * Fill out preq.nr_sects with proper amount of sectors, and setup
502 * assign map[..] with the PFN of the page in our domain with the 705 * assign map[..] with the PFN of the page in our domain with the
503 * corresponding grant reference for each page. 706 * corresponding grant reference for each page.
504 */ 707 */
505 for (i = 0; i < nseg; i++) { 708again:
709 for (i = map_until; i < num; i++) {
506 uint32_t flags; 710 uint32_t flags;
507 711
508 if (use_persistent_gnts) 712 if (use_persistent_gnts)
509 persistent_gnt = get_persistent_gnt( 713 persistent_gnt = get_persistent_gnt(
510 &blkif->persistent_gnts, 714 blkif,
511 req->u.rw.seg[i].gref); 715 pages[i]->gref);
512 716
513 if (persistent_gnt) { 717 if (persistent_gnt) {
514 /* 718 /*
515 * We are using persistent grants and 719 * We are using persistent grants and
516 * the grant is already mapped 720 * the grant is already mapped
517 */ 721 */
518 new_map = false; 722 pages[i]->page = persistent_gnt->page;
519 } else if (use_persistent_gnts && 723 pages[i]->persistent_gnt = persistent_gnt;
520 blkif->persistent_gnt_c <
521 max_mapped_grant_pages(blkif->blk_protocol)) {
522 /*
523 * We are using persistent grants, the grant is
524 * not mapped but we have room for it
525 */
526 new_map = true;
527 persistent_gnt = kmalloc(
528 sizeof(struct persistent_gnt),
529 GFP_KERNEL);
530 if (!persistent_gnt)
531 return -ENOMEM;
532 if (alloc_xenballooned_pages(1, &persistent_gnt->page,
533 false)) {
534 kfree(persistent_gnt);
535 return -ENOMEM;
536 }
537 persistent_gnt->gnt = req->u.rw.seg[i].gref;
538 persistent_gnt->handle = BLKBACK_INVALID_HANDLE;
539
540 pages_to_gnt[segs_to_map] =
541 persistent_gnt->page;
542 addr = (unsigned long) pfn_to_kaddr(
543 page_to_pfn(persistent_gnt->page));
544
545 add_persistent_gnt(&blkif->persistent_gnts,
546 persistent_gnt);
547 blkif->persistent_gnt_c++;
548 pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
549 persistent_gnt->gnt, blkif->persistent_gnt_c,
550 max_mapped_grant_pages(blkif->blk_protocol));
551 } else { 724 } else {
552 /* 725 if (get_free_page(blkif, &pages[i]->page))
553 * We are either using persistent grants and 726 goto out_of_memory;
554 * hit the maximum limit of grants mapped, 727 addr = vaddr(pages[i]->page);
555 * or we are not using persistent grants. 728 pages_to_gnt[segs_to_map] = pages[i]->page;
556 */ 729 pages[i]->persistent_gnt = NULL;
557 if (use_persistent_gnts &&
558 !blkif->vbd.overflow_max_grants) {
559 blkif->vbd.overflow_max_grants = 1;
560 pr_alert(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
561 blkif->domid, blkif->vbd.handle);
562 }
563 new_map = true;
564 pages[i] = blkbk->pending_page(pending_req, i);
565 addr = vaddr(pending_req, i);
566 pages_to_gnt[segs_to_map] =
567 blkbk->pending_page(pending_req, i);
568 }
569
570 if (persistent_gnt) {
571 pages[i] = persistent_gnt->page;
572 persistent_gnts[i] = persistent_gnt;
573 } else {
574 persistent_gnts[i] = NULL;
575 }
576
577 if (new_map) {
578 flags = GNTMAP_host_map; 730 flags = GNTMAP_host_map;
579 if (!persistent_gnt && 731 if (!use_persistent_gnts && ro)
580 (pending_req->operation != BLKIF_OP_READ))
581 flags |= GNTMAP_readonly; 732 flags |= GNTMAP_readonly;
582 gnttab_set_map_op(&map[segs_to_map++], addr, 733 gnttab_set_map_op(&map[segs_to_map++], addr,
583 flags, req->u.rw.seg[i].gref, 734 flags, pages[i]->gref,
584 blkif->domid); 735 blkif->domid);
585 } 736 }
737 map_until = i + 1;
738 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
739 break;
586 } 740 }
587 741
588 if (segs_to_map) { 742 if (segs_to_map) {
@@ -595,49 +749,133 @@ static int xen_blkbk_map(struct blkif_request *req,
595 * so that when we access vaddr(pending_req,i) it has the contents of 749 * so that when we access vaddr(pending_req,i) it has the contents of
596 * the page from the other domain. 750 * the page from the other domain.
597 */ 751 */
598 bitmap_zero(pending_req->unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST); 752 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
599 for (i = 0, j = 0; i < nseg; i++) { 753 if (!pages[seg_idx]->persistent_gnt) {
600 if (!persistent_gnts[i] ||
601 persistent_gnts[i]->handle == BLKBACK_INVALID_HANDLE) {
602 /* This is a newly mapped grant */ 754 /* This is a newly mapped grant */
603 BUG_ON(j >= segs_to_map); 755 BUG_ON(new_map_idx >= segs_to_map);
604 if (unlikely(map[j].status != 0)) { 756 if (unlikely(map[new_map_idx].status != 0)) {
605 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); 757 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
606 map[j].handle = BLKBACK_INVALID_HANDLE; 758 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
607 ret |= 1; 759 ret |= 1;
608 if (persistent_gnts[i]) { 760 goto next;
609 rb_erase(&persistent_gnts[i]->node,
610 &blkif->persistent_gnts);
611 blkif->persistent_gnt_c--;
612 kfree(persistent_gnts[i]);
613 persistent_gnts[i] = NULL;
614 }
615 } 761 }
762 pages[seg_idx]->handle = map[new_map_idx].handle;
763 } else {
764 continue;
616 } 765 }
617 if (persistent_gnts[i]) { 766 if (use_persistent_gnts &&
618 if (persistent_gnts[i]->handle == 767 blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
619 BLKBACK_INVALID_HANDLE) { 768 /*
769 * We are using persistent grants, the grant is
770 * not mapped but we might have room for it.
771 */
772 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
773 GFP_KERNEL);
774 if (!persistent_gnt) {
620 /* 775 /*
621 * If this is a new persistent grant 776 * If we don't have enough memory to
622 * save the handler 777 * allocate the persistent_gnt struct
778 * map this grant non-persistenly
623 */ 779 */
624 persistent_gnts[i]->handle = map[j++].handle; 780 goto next;
625 } 781 }
626 pending_handle(pending_req, i) = 782 persistent_gnt->gnt = map[new_map_idx].ref;
627 persistent_gnts[i]->handle; 783 persistent_gnt->handle = map[new_map_idx].handle;
784 persistent_gnt->page = pages[seg_idx]->page;
785 if (add_persistent_gnt(blkif,
786 persistent_gnt)) {
787 kfree(persistent_gnt);
788 persistent_gnt = NULL;
789 goto next;
790 }
791 pages[seg_idx]->persistent_gnt = persistent_gnt;
792 pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
793 persistent_gnt->gnt, blkif->persistent_gnt_c,
794 xen_blkif_max_pgrants);
795 goto next;
796 }
797 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
798 blkif->vbd.overflow_max_grants = 1;
799 pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
800 blkif->domid, blkif->vbd.handle);
801 }
802 /*
803 * We could not map this grant persistently, so use it as
804 * a non-persistent grant.
805 */
806next:
807 new_map_idx++;
808 }
809 segs_to_map = 0;
810 last_map = map_until;
811 if (map_until != num)
812 goto again;
628 813
629 if (ret) 814 return ret;
630 continue; 815
631 } else { 816out_of_memory:
632 pending_handle(pending_req, i) = map[j++].handle; 817 pr_alert(DRV_PFX "%s: out of memory\n", __func__);
633 bitmap_set(pending_req->unmap_seg, i, 1); 818 put_free_pages(blkif, pages_to_gnt, segs_to_map);
819 return -ENOMEM;
820}
821
822static int xen_blkbk_map_seg(struct pending_req *pending_req)
823{
824 int rc;
825
826 rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
827 pending_req->nr_pages,
828 (pending_req->operation != BLKIF_OP_READ));
829
830 return rc;
831}
634 832
635 if (ret) 833static int xen_blkbk_parse_indirect(struct blkif_request *req,
636 continue; 834 struct pending_req *pending_req,
835 struct seg_buf seg[],
836 struct phys_req *preq)
837{
838 struct grant_page **pages = pending_req->indirect_pages;
839 struct xen_blkif *blkif = pending_req->blkif;
840 int indirect_grefs, rc, n, nseg, i;
841 struct blkif_request_segment_aligned *segments = NULL;
842
843 nseg = pending_req->nr_pages;
844 indirect_grefs = INDIRECT_PAGES(nseg);
845 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
846
847 for (i = 0; i < indirect_grefs; i++)
848 pages[i]->gref = req->u.indirect.indirect_grefs[i];
849
850 rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
851 if (rc)
852 goto unmap;
853
854 for (n = 0, i = 0; n < nseg; n++) {
855 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
856 /* Map indirect segments */
857 if (segments)
858 kunmap_atomic(segments);
859 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
860 }
861 i = n % SEGS_PER_INDIRECT_FRAME;
862 pending_req->segments[n]->gref = segments[i].gref;
863 seg[n].nsec = segments[i].last_sect -
864 segments[i].first_sect + 1;
865 seg[n].offset = (segments[i].first_sect << 9);
866 if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
867 (segments[i].last_sect < segments[i].first_sect)) {
868 rc = -EINVAL;
869 goto unmap;
637 } 870 }
638 seg[i].offset = (req->u.rw.seg[i].first_sect << 9); 871 preq->nr_sects += seg[n].nsec;
639 } 872 }
640 return ret; 873
874unmap:
875 if (segments)
876 kunmap_atomic(segments);
877 xen_blkbk_unmap(blkif, pages, indirect_grefs);
878 return rc;
641} 879}
642 880
643static int dispatch_discard_io(struct xen_blkif *blkif, 881static int dispatch_discard_io(struct xen_blkif *blkif,
@@ -647,7 +885,18 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
647 int status = BLKIF_RSP_OKAY; 885 int status = BLKIF_RSP_OKAY;
648 struct block_device *bdev = blkif->vbd.bdev; 886 struct block_device *bdev = blkif->vbd.bdev;
649 unsigned long secure; 887 unsigned long secure;
888 struct phys_req preq;
889
890 preq.sector_number = req->u.discard.sector_number;
891 preq.nr_sects = req->u.discard.nr_sectors;
650 892
893 err = xen_vbd_translate(&preq, blkif, WRITE);
894 if (err) {
895 pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
896 preq.sector_number,
897 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
898 goto fail_response;
899 }
651 blkif->st_ds_req++; 900 blkif->st_ds_req++;
652 901
653 xen_blkif_get(blkif); 902 xen_blkif_get(blkif);
@@ -658,7 +907,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
658 err = blkdev_issue_discard(bdev, req->u.discard.sector_number, 907 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
659 req->u.discard.nr_sectors, 908 req->u.discard.nr_sectors,
660 GFP_KERNEL, secure); 909 GFP_KERNEL, secure);
661 910fail_response:
662 if (err == -EOPNOTSUPP) { 911 if (err == -EOPNOTSUPP) {
663 pr_debug(DRV_PFX "discard op failed, not supported\n"); 912 pr_debug(DRV_PFX "discard op failed, not supported\n");
664 status = BLKIF_RSP_EOPNOTSUPP; 913 status = BLKIF_RSP_EOPNOTSUPP;
@@ -674,7 +923,7 @@ static int dispatch_other_io(struct xen_blkif *blkif,
674 struct blkif_request *req, 923 struct blkif_request *req,
675 struct pending_req *pending_req) 924 struct pending_req *pending_req)
676{ 925{
677 free_req(pending_req); 926 free_req(blkif, pending_req);
678 make_response(blkif, req->u.other.id, req->operation, 927 make_response(blkif, req->u.other.id, req->operation,
679 BLKIF_RSP_EOPNOTSUPP); 928 BLKIF_RSP_EOPNOTSUPP);
680 return -EIO; 929 return -EIO;
@@ -726,7 +975,9 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
726 * the proper response on the ring. 975 * the proper response on the ring.
727 */ 976 */
728 if (atomic_dec_and_test(&pending_req->pendcnt)) { 977 if (atomic_dec_and_test(&pending_req->pendcnt)) {
729 xen_blkbk_unmap(pending_req); 978 xen_blkbk_unmap(pending_req->blkif,
979 pending_req->segments,
980 pending_req->nr_pages);
730 make_response(pending_req->blkif, pending_req->id, 981 make_response(pending_req->blkif, pending_req->id,
731 pending_req->operation, pending_req->status); 982 pending_req->operation, pending_req->status);
732 xen_blkif_put(pending_req->blkif); 983 xen_blkif_put(pending_req->blkif);
@@ -734,7 +985,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
734 if (atomic_read(&pending_req->blkif->drain)) 985 if (atomic_read(&pending_req->blkif->drain))
735 complete(&pending_req->blkif->drain_complete); 986 complete(&pending_req->blkif->drain_complete);
736 } 987 }
737 free_req(pending_req); 988 free_req(pending_req->blkif, pending_req);
738 } 989 }
739} 990}
740 991
@@ -767,6 +1018,12 @@ __do_block_io_op(struct xen_blkif *blkif)
767 rp = blk_rings->common.sring->req_prod; 1018 rp = blk_rings->common.sring->req_prod;
768 rmb(); /* Ensure we see queued requests up to 'rp'. */ 1019 rmb(); /* Ensure we see queued requests up to 'rp'. */
769 1020
1021 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1022 rc = blk_rings->common.rsp_prod_pvt;
1023 pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1024 rp, rc, rp - rc, blkif->vbd.pdevice);
1025 return -EACCES;
1026 }
770 while (rc != rp) { 1027 while (rc != rp) {
771 1028
772 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) 1029 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
@@ -777,7 +1034,7 @@ __do_block_io_op(struct xen_blkif *blkif)
777 break; 1034 break;
778 } 1035 }
779 1036
780 pending_req = alloc_req(); 1037 pending_req = alloc_req(blkif);
781 if (NULL == pending_req) { 1038 if (NULL == pending_req) {
782 blkif->st_oo_req++; 1039 blkif->st_oo_req++;
783 more_to_do = 1; 1040 more_to_do = 1;
@@ -807,11 +1064,12 @@ __do_block_io_op(struct xen_blkif *blkif)
807 case BLKIF_OP_WRITE: 1064 case BLKIF_OP_WRITE:
808 case BLKIF_OP_WRITE_BARRIER: 1065 case BLKIF_OP_WRITE_BARRIER:
809 case BLKIF_OP_FLUSH_DISKCACHE: 1066 case BLKIF_OP_FLUSH_DISKCACHE:
1067 case BLKIF_OP_INDIRECT:
810 if (dispatch_rw_block_io(blkif, &req, pending_req)) 1068 if (dispatch_rw_block_io(blkif, &req, pending_req))
811 goto done; 1069 goto done;
812 break; 1070 break;
813 case BLKIF_OP_DISCARD: 1071 case BLKIF_OP_DISCARD:
814 free_req(pending_req); 1072 free_req(blkif, pending_req);
815 if (dispatch_discard_io(blkif, &req)) 1073 if (dispatch_discard_io(blkif, &req))
816 goto done; 1074 goto done;
817 break; 1075 break;
@@ -853,17 +1111,28 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
853 struct pending_req *pending_req) 1111 struct pending_req *pending_req)
854{ 1112{
855 struct phys_req preq; 1113 struct phys_req preq;
856 struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 1114 struct seg_buf *seg = pending_req->seg;
857 unsigned int nseg; 1115 unsigned int nseg;
858 struct bio *bio = NULL; 1116 struct bio *bio = NULL;
859 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 1117 struct bio **biolist = pending_req->biolist;
860 int i, nbio = 0; 1118 int i, nbio = 0;
861 int operation; 1119 int operation;
862 struct blk_plug plug; 1120 struct blk_plug plug;
863 bool drain = false; 1121 bool drain = false;
864 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 1122 struct grant_page **pages = pending_req->segments;
1123 unsigned short req_operation;
1124
1125 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1126 req->u.indirect.indirect_op : req->operation;
1127 if ((req->operation == BLKIF_OP_INDIRECT) &&
1128 (req_operation != BLKIF_OP_READ) &&
1129 (req_operation != BLKIF_OP_WRITE)) {
1130 pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
1131 req_operation);
1132 goto fail_response;
1133 }
865 1134
866 switch (req->operation) { 1135 switch (req_operation) {
867 case BLKIF_OP_READ: 1136 case BLKIF_OP_READ:
868 blkif->st_rd_req++; 1137 blkif->st_rd_req++;
869 operation = READ; 1138 operation = READ;
@@ -885,33 +1154,47 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
885 } 1154 }
886 1155
887 /* Check that the number of segments is sane. */ 1156 /* Check that the number of segments is sane. */
888 nseg = req->u.rw.nr_segments; 1157 nseg = req->operation == BLKIF_OP_INDIRECT ?
1158 req->u.indirect.nr_segments : req->u.rw.nr_segments;
889 1159
890 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || 1160 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
891 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { 1161 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1162 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1163 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1164 (nseg > MAX_INDIRECT_SEGMENTS))) {
892 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", 1165 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
893 nseg); 1166 nseg);
894 /* Haven't submitted any bio's yet. */ 1167 /* Haven't submitted any bio's yet. */
895 goto fail_response; 1168 goto fail_response;
896 } 1169 }
897 1170
898 preq.sector_number = req->u.rw.sector_number;
899 preq.nr_sects = 0; 1171 preq.nr_sects = 0;
900 1172
901 pending_req->blkif = blkif; 1173 pending_req->blkif = blkif;
902 pending_req->id = req->u.rw.id; 1174 pending_req->id = req->u.rw.id;
903 pending_req->operation = req->operation; 1175 pending_req->operation = req_operation;
904 pending_req->status = BLKIF_RSP_OKAY; 1176 pending_req->status = BLKIF_RSP_OKAY;
905 pending_req->nr_pages = nseg; 1177 pending_req->nr_pages = nseg;
906 1178
907 for (i = 0; i < nseg; i++) { 1179 if (req->operation != BLKIF_OP_INDIRECT) {
908 seg[i].nsec = req->u.rw.seg[i].last_sect - 1180 preq.dev = req->u.rw.handle;
909 req->u.rw.seg[i].first_sect + 1; 1181 preq.sector_number = req->u.rw.sector_number;
910 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || 1182 for (i = 0; i < nseg; i++) {
911 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) 1183 pages[i]->gref = req->u.rw.seg[i].gref;
1184 seg[i].nsec = req->u.rw.seg[i].last_sect -
1185 req->u.rw.seg[i].first_sect + 1;
1186 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1187 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1188 (req->u.rw.seg[i].last_sect <
1189 req->u.rw.seg[i].first_sect))
1190 goto fail_response;
1191 preq.nr_sects += seg[i].nsec;
1192 }
1193 } else {
1194 preq.dev = req->u.indirect.handle;
1195 preq.sector_number = req->u.indirect.sector_number;
1196 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
912 goto fail_response; 1197 goto fail_response;
913 preq.nr_sects += seg[i].nsec;
914
915 } 1198 }
916 1199
917 if (xen_vbd_translate(&preq, blkif, operation) != 0) { 1200 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
@@ -948,7 +1231,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
948 * the hypercall to unmap the grants - that is all done in 1231 * the hypercall to unmap the grants - that is all done in
949 * xen_blkbk_unmap. 1232 * xen_blkbk_unmap.
950 */ 1233 */
951 if (xen_blkbk_map(req, pending_req, seg, pages)) 1234 if (xen_blkbk_map_seg(pending_req))
952 goto fail_flush; 1235 goto fail_flush;
953 1236
954 /* 1237 /*
@@ -960,11 +1243,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
960 for (i = 0; i < nseg; i++) { 1243 for (i = 0; i < nseg; i++) {
961 while ((bio == NULL) || 1244 while ((bio == NULL) ||
962 (bio_add_page(bio, 1245 (bio_add_page(bio,
963 pages[i], 1246 pages[i]->page,
964 seg[i].nsec << 9, 1247 seg[i].nsec << 9,
965 seg[i].offset) == 0)) { 1248 seg[i].offset) == 0)) {
966 1249
967 bio = bio_alloc(GFP_KERNEL, nseg-i); 1250 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1251 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
968 if (unlikely(bio == NULL)) 1252 if (unlikely(bio == NULL))
969 goto fail_put_bio; 1253 goto fail_put_bio;
970 1254
@@ -1009,11 +1293,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1009 return 0; 1293 return 0;
1010 1294
1011 fail_flush: 1295 fail_flush:
1012 xen_blkbk_unmap(pending_req); 1296 xen_blkbk_unmap(blkif, pending_req->segments,
1297 pending_req->nr_pages);
1013 fail_response: 1298 fail_response:
1014 /* Haven't submitted any bio's yet. */ 1299 /* Haven't submitted any bio's yet. */
1015 make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); 1300 make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1016 free_req(pending_req); 1301 free_req(blkif, pending_req);
1017 msleep(1); /* back off a bit */ 1302 msleep(1); /* back off a bit */
1018 return -EIO; 1303 return -EIO;
1019 1304
@@ -1070,73 +1355,20 @@ static void make_response(struct xen_blkif *blkif, u64 id,
1070 1355
1071static int __init xen_blkif_init(void) 1356static int __init xen_blkif_init(void)
1072{ 1357{
1073 int i, mmap_pages;
1074 int rc = 0; 1358 int rc = 0;
1075 1359
1076 if (!xen_domain()) 1360 if (!xen_domain())
1077 return -ENODEV; 1361 return -ENODEV;
1078 1362
1079 blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
1080 if (!blkbk) {
1081 pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
1082 return -ENOMEM;
1083 }
1084
1085 mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1086
1087 blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) *
1088 xen_blkif_reqs, GFP_KERNEL);
1089 blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
1090 mmap_pages, GFP_KERNEL);
1091 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
1092 mmap_pages, GFP_KERNEL);
1093
1094 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
1095 !blkbk->pending_pages) {
1096 rc = -ENOMEM;
1097 goto out_of_memory;
1098 }
1099
1100 for (i = 0; i < mmap_pages; i++) {
1101 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
1102 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
1103 if (blkbk->pending_pages[i] == NULL) {
1104 rc = -ENOMEM;
1105 goto out_of_memory;
1106 }
1107 }
1108 rc = xen_blkif_interface_init(); 1363 rc = xen_blkif_interface_init();
1109 if (rc) 1364 if (rc)
1110 goto failed_init; 1365 goto failed_init;
1111 1366
1112 INIT_LIST_HEAD(&blkbk->pending_free);
1113 spin_lock_init(&blkbk->pending_free_lock);
1114 init_waitqueue_head(&blkbk->pending_free_wq);
1115
1116 for (i = 0; i < xen_blkif_reqs; i++)
1117 list_add_tail(&blkbk->pending_reqs[i].free_list,
1118 &blkbk->pending_free);
1119
1120 rc = xen_blkif_xenbus_init(); 1367 rc = xen_blkif_xenbus_init();
1121 if (rc) 1368 if (rc)
1122 goto failed_init; 1369 goto failed_init;
1123 1370
1124 return 0;
1125
1126 out_of_memory:
1127 pr_alert(DRV_PFX "%s: out of memory\n", __func__);
1128 failed_init: 1371 failed_init:
1129 kfree(blkbk->pending_reqs);
1130 kfree(blkbk->pending_grant_handles);
1131 if (blkbk->pending_pages) {
1132 for (i = 0; i < mmap_pages; i++) {
1133 if (blkbk->pending_pages[i])
1134 __free_page(blkbk->pending_pages[i]);
1135 }
1136 kfree(blkbk->pending_pages);
1137 }
1138 kfree(blkbk);
1139 blkbk = NULL;
1140 return rc; 1372 return rc;
1141} 1373}
1142 1374
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 60103e2517ba..8d8807563d99 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -50,6 +50,19 @@
50 __func__, __LINE__, ##args) 50 __func__, __LINE__, ##args)
51 51
52 52
53/*
54 * This is the maximum number of segments that would be allowed in indirect
55 * requests. This value will also be passed to the frontend.
56 */
57#define MAX_INDIRECT_SEGMENTS 256
58
59#define SEGS_PER_INDIRECT_FRAME \
60 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
61#define MAX_INDIRECT_PAGES \
62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
63#define INDIRECT_PAGES(_segs) \
64 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
65
53/* Not a real protocol. Used to generate ring structs which contain 66/* Not a real protocol. Used to generate ring structs which contain
54 * the elements common to all protocols only. This way we get a 67 * the elements common to all protocols only. This way we get a
55 * compiler-checkable way to use common struct elements, so we can 68 * compiler-checkable way to use common struct elements, so we can
@@ -83,12 +96,31 @@ struct blkif_x86_32_request_other {
83 uint64_t id; /* private guest value, echoed in resp */ 96 uint64_t id; /* private guest value, echoed in resp */
84} __attribute__((__packed__)); 97} __attribute__((__packed__));
85 98
99struct blkif_x86_32_request_indirect {
100 uint8_t indirect_op;
101 uint16_t nr_segments;
102 uint64_t id;
103 blkif_sector_t sector_number;
104 blkif_vdev_t handle;
105 uint16_t _pad1;
106 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
107 /*
108 * The maximum number of indirect segments (and pages) that will
109 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
110 * is also exported to the guest (via xenstore
111 * feature-max-indirect-segments entry), so the frontend knows how
112 * many indirect segments the backend supports.
113 */
114 uint64_t _pad2; /* make it 64 byte aligned */
115} __attribute__((__packed__));
116
86struct blkif_x86_32_request { 117struct blkif_x86_32_request {
87 uint8_t operation; /* BLKIF_OP_??? */ 118 uint8_t operation; /* BLKIF_OP_??? */
88 union { 119 union {
89 struct blkif_x86_32_request_rw rw; 120 struct blkif_x86_32_request_rw rw;
90 struct blkif_x86_32_request_discard discard; 121 struct blkif_x86_32_request_discard discard;
91 struct blkif_x86_32_request_other other; 122 struct blkif_x86_32_request_other other;
123 struct blkif_x86_32_request_indirect indirect;
92 } u; 124 } u;
93} __attribute__((__packed__)); 125} __attribute__((__packed__));
94 126
@@ -127,12 +159,32 @@ struct blkif_x86_64_request_other {
127 uint64_t id; /* private guest value, echoed in resp */ 159 uint64_t id; /* private guest value, echoed in resp */
128} __attribute__((__packed__)); 160} __attribute__((__packed__));
129 161
162struct blkif_x86_64_request_indirect {
163 uint8_t indirect_op;
164 uint16_t nr_segments;
165 uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
166 uint64_t id;
167 blkif_sector_t sector_number;
168 blkif_vdev_t handle;
169 uint16_t _pad2;
170 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
171 /*
172 * The maximum number of indirect segments (and pages) that will
173 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
174 * is also exported to the guest (via xenstore
175 * feature-max-indirect-segments entry), so the frontend knows how
176 * many indirect segments the backend supports.
177 */
178 uint32_t _pad3; /* make it 64 byte aligned */
179} __attribute__((__packed__));
180
130struct blkif_x86_64_request { 181struct blkif_x86_64_request {
131 uint8_t operation; /* BLKIF_OP_??? */ 182 uint8_t operation; /* BLKIF_OP_??? */
132 union { 183 union {
133 struct blkif_x86_64_request_rw rw; 184 struct blkif_x86_64_request_rw rw;
134 struct blkif_x86_64_request_discard discard; 185 struct blkif_x86_64_request_discard discard;
135 struct blkif_x86_64_request_other other; 186 struct blkif_x86_64_request_other other;
187 struct blkif_x86_64_request_indirect indirect;
136 } u; 188 } u;
137} __attribute__((__packed__)); 189} __attribute__((__packed__));
138 190
@@ -182,12 +234,26 @@ struct xen_vbd {
182 234
183struct backend_info; 235struct backend_info;
184 236
237/* Number of available flags */
238#define PERSISTENT_GNT_FLAGS_SIZE 2
239/* This persistent grant is currently in use */
240#define PERSISTENT_GNT_ACTIVE 0
241/*
242 * This persistent grant has been used, this flag is set when we remove the
243 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
244 */
245#define PERSISTENT_GNT_WAS_ACTIVE 1
246
247/* Number of requests that we can fit in a ring */
248#define XEN_BLKIF_REQS 32
185 249
186struct persistent_gnt { 250struct persistent_gnt {
187 struct page *page; 251 struct page *page;
188 grant_ref_t gnt; 252 grant_ref_t gnt;
189 grant_handle_t handle; 253 grant_handle_t handle;
254 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
190 struct rb_node node; 255 struct rb_node node;
256 struct list_head remove_node;
191}; 257};
192 258
193struct xen_blkif { 259struct xen_blkif {
@@ -219,6 +285,23 @@ struct xen_blkif {
219 /* tree to store persistent grants */ 285 /* tree to store persistent grants */
220 struct rb_root persistent_gnts; 286 struct rb_root persistent_gnts;
221 unsigned int persistent_gnt_c; 287 unsigned int persistent_gnt_c;
288 atomic_t persistent_gnt_in_use;
289 unsigned long next_lru;
290
291 /* used by the kworker that offload work from the persistent purge */
292 struct list_head persistent_purge_list;
293 struct work_struct persistent_purge_work;
294
295 /* buffer of free pages to map grant refs */
296 spinlock_t free_pages_lock;
297 int free_pages_num;
298 struct list_head free_pages;
299
300 /* List of all 'pending_req' available */
301 struct list_head pending_free;
302 /* And its spinlock. */
303 spinlock_t pending_free_lock;
304 wait_queue_head_t pending_free_wq;
222 305
223 /* statistics */ 306 /* statistics */
224 unsigned long st_print; 307 unsigned long st_print;
@@ -231,6 +314,41 @@ struct xen_blkif {
231 unsigned long long st_wr_sect; 314 unsigned long long st_wr_sect;
232 315
233 wait_queue_head_t waiting_to_free; 316 wait_queue_head_t waiting_to_free;
317 /* Thread shutdown wait queue. */
318 wait_queue_head_t shutdown_wq;
319};
320
321struct seg_buf {
322 unsigned long offset;
323 unsigned int nsec;
324};
325
326struct grant_page {
327 struct page *page;
328 struct persistent_gnt *persistent_gnt;
329 grant_handle_t handle;
330 grant_ref_t gref;
331};
332
333/*
334 * Each outstanding request that we've passed to the lower device layers has a
335 * 'pending_req' allocated to it. Each buffer_head that completes decrements
336 * the pendcnt towards zero. When it hits zero, the specified domain has a
337 * response queued for it, with the saved 'id' passed back.
338 */
339struct pending_req {
340 struct xen_blkif *blkif;
341 u64 id;
342 int nr_pages;
343 atomic_t pendcnt;
344 unsigned short operation;
345 int status;
346 struct list_head free_list;
347 struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
348 /* Indirect descriptors */
349 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
350 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
351 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
234}; 352};
235 353
236 354
@@ -257,6 +375,7 @@ int xen_blkif_xenbus_init(void);
257 375
258irqreturn_t xen_blkif_be_int(int irq, void *dev_id); 376irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
259int xen_blkif_schedule(void *arg); 377int xen_blkif_schedule(void *arg);
378int xen_blkif_purge_persistent(void *arg);
260 379
261int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, 380int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
262 struct backend_info *be, int state); 381 struct backend_info *be, int state);
@@ -268,7 +387,7 @@ struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
268static inline void blkif_get_x86_32_req(struct blkif_request *dst, 387static inline void blkif_get_x86_32_req(struct blkif_request *dst,
269 struct blkif_x86_32_request *src) 388 struct blkif_x86_32_request *src)
270{ 389{
271 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; 390 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
272 dst->operation = src->operation; 391 dst->operation = src->operation;
273 switch (src->operation) { 392 switch (src->operation) {
274 case BLKIF_OP_READ: 393 case BLKIF_OP_READ:
@@ -291,6 +410,18 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
291 dst->u.discard.sector_number = src->u.discard.sector_number; 410 dst->u.discard.sector_number = src->u.discard.sector_number;
292 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 411 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
293 break; 412 break;
413 case BLKIF_OP_INDIRECT:
414 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
415 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
416 dst->u.indirect.handle = src->u.indirect.handle;
417 dst->u.indirect.id = src->u.indirect.id;
418 dst->u.indirect.sector_number = src->u.indirect.sector_number;
419 barrier();
420 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
421 for (i = 0; i < j; i++)
422 dst->u.indirect.indirect_grefs[i] =
423 src->u.indirect.indirect_grefs[i];
424 break;
294 default: 425 default:
295 /* 426 /*
296 * Don't know how to translate this op. Only get the 427 * Don't know how to translate this op. Only get the
@@ -304,7 +435,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
304static inline void blkif_get_x86_64_req(struct blkif_request *dst, 435static inline void blkif_get_x86_64_req(struct blkif_request *dst,
305 struct blkif_x86_64_request *src) 436 struct blkif_x86_64_request *src)
306{ 437{
307 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; 438 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
308 dst->operation = src->operation; 439 dst->operation = src->operation;
309 switch (src->operation) { 440 switch (src->operation) {
310 case BLKIF_OP_READ: 441 case BLKIF_OP_READ:
@@ -327,6 +458,18 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
327 dst->u.discard.sector_number = src->u.discard.sector_number; 458 dst->u.discard.sector_number = src->u.discard.sector_number;
328 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 459 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
329 break; 460 break;
461 case BLKIF_OP_INDIRECT:
462 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
463 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
464 dst->u.indirect.handle = src->u.indirect.handle;
465 dst->u.indirect.id = src->u.indirect.id;
466 dst->u.indirect.sector_number = src->u.indirect.sector_number;
467 barrier();
468 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
469 for (i = 0; i < j; i++)
470 dst->u.indirect.indirect_grefs[i] =
471 src->u.indirect.indirect_grefs[i];
472 break;
330 default: 473 default:
331 /* 474 /*
332 * Don't know how to translate this op. Only get the 475 * Don't know how to translate this op. Only get the
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 04608a6502d7..fe5c3cd10c34 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -98,12 +98,17 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
98 err = PTR_ERR(blkif->xenblkd); 98 err = PTR_ERR(blkif->xenblkd);
99 blkif->xenblkd = NULL; 99 blkif->xenblkd = NULL;
100 xenbus_dev_error(blkif->be->dev, err, "start xenblkd"); 100 xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
101 return;
101 } 102 }
102} 103}
103 104
104static struct xen_blkif *xen_blkif_alloc(domid_t domid) 105static struct xen_blkif *xen_blkif_alloc(domid_t domid)
105{ 106{
106 struct xen_blkif *blkif; 107 struct xen_blkif *blkif;
108 struct pending_req *req, *n;
109 int i, j;
110
111 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
107 112
108 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL); 113 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
109 if (!blkif) 114 if (!blkif)
@@ -118,8 +123,57 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
118 blkif->st_print = jiffies; 123 blkif->st_print = jiffies;
119 init_waitqueue_head(&blkif->waiting_to_free); 124 init_waitqueue_head(&blkif->waiting_to_free);
120 blkif->persistent_gnts.rb_node = NULL; 125 blkif->persistent_gnts.rb_node = NULL;
126 spin_lock_init(&blkif->free_pages_lock);
127 INIT_LIST_HEAD(&blkif->free_pages);
128 blkif->free_pages_num = 0;
129 atomic_set(&blkif->persistent_gnt_in_use, 0);
130
131 INIT_LIST_HEAD(&blkif->pending_free);
132
133 for (i = 0; i < XEN_BLKIF_REQS; i++) {
134 req = kzalloc(sizeof(*req), GFP_KERNEL);
135 if (!req)
136 goto fail;
137 list_add_tail(&req->free_list,
138 &blkif->pending_free);
139 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
140 req->segments[j] = kzalloc(sizeof(*req->segments[0]),
141 GFP_KERNEL);
142 if (!req->segments[j])
143 goto fail;
144 }
145 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
146 req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
147 GFP_KERNEL);
148 if (!req->indirect_pages[j])
149 goto fail;
150 }
151 }
152 spin_lock_init(&blkif->pending_free_lock);
153 init_waitqueue_head(&blkif->pending_free_wq);
154 init_waitqueue_head(&blkif->shutdown_wq);
121 155
122 return blkif; 156 return blkif;
157
158fail:
159 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
160 list_del(&req->free_list);
161 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
162 if (!req->segments[j])
163 break;
164 kfree(req->segments[j]);
165 }
166 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
167 if (!req->indirect_pages[j])
168 break;
169 kfree(req->indirect_pages[j]);
170 }
171 kfree(req);
172 }
173
174 kmem_cache_free(xen_blkif_cachep, blkif);
175
176 return ERR_PTR(-ENOMEM);
123} 177}
124 178
125static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, 179static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
@@ -178,6 +232,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
178{ 232{
179 if (blkif->xenblkd) { 233 if (blkif->xenblkd) {
180 kthread_stop(blkif->xenblkd); 234 kthread_stop(blkif->xenblkd);
235 wake_up(&blkif->shutdown_wq);
181 blkif->xenblkd = NULL; 236 blkif->xenblkd = NULL;
182 } 237 }
183 238
@@ -198,8 +253,28 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
198 253
199static void xen_blkif_free(struct xen_blkif *blkif) 254static void xen_blkif_free(struct xen_blkif *blkif)
200{ 255{
256 struct pending_req *req, *n;
257 int i = 0, j;
258
201 if (!atomic_dec_and_test(&blkif->refcnt)) 259 if (!atomic_dec_and_test(&blkif->refcnt))
202 BUG(); 260 BUG();
261
262 /* Check that there is no request in use */
263 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
264 list_del(&req->free_list);
265
266 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
267 kfree(req->segments[j]);
268
269 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
270 kfree(req->indirect_pages[j]);
271
272 kfree(req);
273 i++;
274 }
275
276 WARN_ON(i != XEN_BLKIF_REQS);
277
203 kmem_cache_free(xen_blkif_cachep, blkif); 278 kmem_cache_free(xen_blkif_cachep, blkif);
204} 279}
205 280
@@ -678,6 +753,11 @@ again:
678 dev->nodename); 753 dev->nodename);
679 goto abort; 754 goto abort;
680 } 755 }
756 err = xenbus_printf(xbt, dev->nodename, "feature-max-indirect-segments", "%u",
757 MAX_INDIRECT_SEGMENTS);
758 if (err)
759 dev_warn(&dev->dev, "writing %s/feature-max-indirect-segments (%d)",
760 dev->nodename, err);
681 761
682 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", 762 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
683 (unsigned long long)vbd_sz(&be->blkif->vbd)); 763 (unsigned long long)vbd_sz(&be->blkif->vbd));
@@ -704,6 +784,11 @@ again:
704 dev->nodename); 784 dev->nodename);
705 goto abort; 785 goto abort;
706 } 786 }
787 err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
788 bdev_physical_block_size(be->blkif->vbd.bdev));
789 if (err)
790 xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
791 dev->nodename);
707 792
708 err = xenbus_transaction_end(xbt, 0); 793 err = xenbus_transaction_end(xbt, 0);
709 if (err == -EAGAIN) 794 if (err == -EAGAIN)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d89ef86220f4..a4660bbee8a6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -74,12 +74,30 @@ struct grant {
74struct blk_shadow { 74struct blk_shadow {
75 struct blkif_request req; 75 struct blkif_request req;
76 struct request *request; 76 struct request *request;
77 struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 77 struct grant **grants_used;
78 struct grant **indirect_grants;
79 struct scatterlist *sg;
80};
81
82struct split_bio {
83 struct bio *bio;
84 atomic_t pending;
85 int err;
78}; 86};
79 87
80static DEFINE_MUTEX(blkfront_mutex); 88static DEFINE_MUTEX(blkfront_mutex);
81static const struct block_device_operations xlvbd_block_fops; 89static const struct block_device_operations xlvbd_block_fops;
82 90
91/*
92 * Maximum number of segments in indirect requests, the actual value used by
93 * the frontend driver is the minimum of this value and the value provided
94 * by the backend driver.
95 */
96
97static unsigned int xen_blkif_max_segments = 32;
98module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
99MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
100
83#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) 101#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
84 102
85/* 103/*
@@ -98,7 +116,6 @@ struct blkfront_info
98 enum blkif_state connected; 116 enum blkif_state connected;
99 int ring_ref; 117 int ring_ref;
100 struct blkif_front_ring ring; 118 struct blkif_front_ring ring;
101 struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
102 unsigned int evtchn, irq; 119 unsigned int evtchn, irq;
103 struct request_queue *rq; 120 struct request_queue *rq;
104 struct work_struct work; 121 struct work_struct work;
@@ -114,6 +131,7 @@ struct blkfront_info
114 unsigned int discard_granularity; 131 unsigned int discard_granularity;
115 unsigned int discard_alignment; 132 unsigned int discard_alignment;
116 unsigned int feature_persistent:1; 133 unsigned int feature_persistent:1;
134 unsigned int max_indirect_segments;
117 int is_ready; 135 int is_ready;
118}; 136};
119 137
@@ -142,6 +160,13 @@ static DEFINE_SPINLOCK(minor_lock);
142 160
143#define DEV_NAME "xvd" /* name in /dev */ 161#define DEV_NAME "xvd" /* name in /dev */
144 162
163#define SEGS_PER_INDIRECT_FRAME \
164 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
165#define INDIRECT_GREFS(_segs) \
166 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
167
168static int blkfront_setup_indirect(struct blkfront_info *info);
169
145static int get_id_from_freelist(struct blkfront_info *info) 170static int get_id_from_freelist(struct blkfront_info *info)
146{ 171{
147 unsigned long free = info->shadow_free; 172 unsigned long free = info->shadow_free;
@@ -358,7 +383,8 @@ static int blkif_queue_request(struct request *req)
358 struct blkif_request *ring_req; 383 struct blkif_request *ring_req;
359 unsigned long id; 384 unsigned long id;
360 unsigned int fsect, lsect; 385 unsigned int fsect, lsect;
361 int i, ref; 386 int i, ref, n;
387 struct blkif_request_segment_aligned *segments = NULL;
362 388
363 /* 389 /*
364 * Used to store if we are able to queue the request by just using 390 * Used to store if we are able to queue the request by just using
@@ -369,21 +395,27 @@ static int blkif_queue_request(struct request *req)
369 grant_ref_t gref_head; 395 grant_ref_t gref_head;
370 struct grant *gnt_list_entry = NULL; 396 struct grant *gnt_list_entry = NULL;
371 struct scatterlist *sg; 397 struct scatterlist *sg;
398 int nseg, max_grefs;
372 399
373 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 400 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
374 return 1; 401 return 1;
375 402
376 /* Check if we have enought grants to allocate a requests */ 403 max_grefs = info->max_indirect_segments ?
377 if (info->persistent_gnts_c < BLKIF_MAX_SEGMENTS_PER_REQUEST) { 404 info->max_indirect_segments +
405 INDIRECT_GREFS(info->max_indirect_segments) :
406 BLKIF_MAX_SEGMENTS_PER_REQUEST;
407
408 /* Check if we have enough grants to allocate a requests */
409 if (info->persistent_gnts_c < max_grefs) {
378 new_persistent_gnts = 1; 410 new_persistent_gnts = 1;
379 if (gnttab_alloc_grant_references( 411 if (gnttab_alloc_grant_references(
380 BLKIF_MAX_SEGMENTS_PER_REQUEST - info->persistent_gnts_c, 412 max_grefs - info->persistent_gnts_c,
381 &gref_head) < 0) { 413 &gref_head) < 0) {
382 gnttab_request_free_callback( 414 gnttab_request_free_callback(
383 &info->callback, 415 &info->callback,
384 blkif_restart_queue_callback, 416 blkif_restart_queue_callback,
385 info, 417 info,
386 BLKIF_MAX_SEGMENTS_PER_REQUEST); 418 max_grefs);
387 return 1; 419 return 1;
388 } 420 }
389 } else 421 } else
@@ -394,42 +426,67 @@ static int blkif_queue_request(struct request *req)
394 id = get_id_from_freelist(info); 426 id = get_id_from_freelist(info);
395 info->shadow[id].request = req; 427 info->shadow[id].request = req;
396 428
397 ring_req->u.rw.id = id;
398 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
399 ring_req->u.rw.handle = info->handle;
400
401 ring_req->operation = rq_data_dir(req) ?
402 BLKIF_OP_WRITE : BLKIF_OP_READ;
403
404 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
405 /*
406 * Ideally we can do an unordered flush-to-disk. In case the
407 * backend onlysupports barriers, use that. A barrier request
408 * a superset of FUA, so we can implement it the same
409 * way. (It's also a FLUSH+FUA, since it is
410 * guaranteed ordered WRT previous writes.)
411 */
412 ring_req->operation = info->flush_op;
413 }
414
415 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { 429 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
416 /* id, sector_number and handle are set above. */
417 ring_req->operation = BLKIF_OP_DISCARD; 430 ring_req->operation = BLKIF_OP_DISCARD;
418 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); 431 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
432 ring_req->u.discard.id = id;
433 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
419 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) 434 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
420 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; 435 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
421 else 436 else
422 ring_req->u.discard.flag = 0; 437 ring_req->u.discard.flag = 0;
423 } else { 438 } else {
424 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req, 439 BUG_ON(info->max_indirect_segments == 0 &&
425 info->sg); 440 req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
426 BUG_ON(ring_req->u.rw.nr_segments > 441 BUG_ON(info->max_indirect_segments &&
427 BLKIF_MAX_SEGMENTS_PER_REQUEST); 442 req->nr_phys_segments > info->max_indirect_segments);
428 443 nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
429 for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) { 444 ring_req->u.rw.id = id;
445 if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
446 /*
447 * The indirect operation can only be a BLKIF_OP_READ or
448 * BLKIF_OP_WRITE
449 */
450 BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
451 ring_req->operation = BLKIF_OP_INDIRECT;
452 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
453 BLKIF_OP_WRITE : BLKIF_OP_READ;
454 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
455 ring_req->u.indirect.handle = info->handle;
456 ring_req->u.indirect.nr_segments = nseg;
457 } else {
458 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
459 ring_req->u.rw.handle = info->handle;
460 ring_req->operation = rq_data_dir(req) ?
461 BLKIF_OP_WRITE : BLKIF_OP_READ;
462 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
463 /*
464 * Ideally we can do an unordered flush-to-disk. In case the
465 * backend onlysupports barriers, use that. A barrier request
466 * a superset of FUA, so we can implement it the same
467 * way. (It's also a FLUSH+FUA, since it is
468 * guaranteed ordered WRT previous writes.)
469 */
470 ring_req->operation = info->flush_op;
471 }
472 ring_req->u.rw.nr_segments = nseg;
473 }
474 for_each_sg(info->shadow[id].sg, sg, nseg, i) {
430 fsect = sg->offset >> 9; 475 fsect = sg->offset >> 9;
431 lsect = fsect + (sg->length >> 9) - 1; 476 lsect = fsect + (sg->length >> 9) - 1;
432 477
478 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
479 (i % SEGS_PER_INDIRECT_FRAME == 0)) {
480 if (segments)
481 kunmap_atomic(segments);
482
483 n = i / SEGS_PER_INDIRECT_FRAME;
484 gnt_list_entry = get_grant(&gref_head, info);
485 info->shadow[id].indirect_grants[n] = gnt_list_entry;
486 segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
487 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
488 }
489
433 gnt_list_entry = get_grant(&gref_head, info); 490 gnt_list_entry = get_grant(&gref_head, info);
434 ref = gnt_list_entry->gref; 491 ref = gnt_list_entry->gref;
435 492
@@ -441,8 +498,7 @@ static int blkif_queue_request(struct request *req)
441 498
442 BUG_ON(sg->offset + sg->length > PAGE_SIZE); 499 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
443 500
444 shared_data = kmap_atomic( 501 shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
445 pfn_to_page(gnt_list_entry->pfn));
446 bvec_data = kmap_atomic(sg_page(sg)); 502 bvec_data = kmap_atomic(sg_page(sg));
447 503
448 /* 504 /*
@@ -461,13 +517,23 @@ static int blkif_queue_request(struct request *req)
461 kunmap_atomic(bvec_data); 517 kunmap_atomic(bvec_data);
462 kunmap_atomic(shared_data); 518 kunmap_atomic(shared_data);
463 } 519 }
464 520 if (ring_req->operation != BLKIF_OP_INDIRECT) {
465 ring_req->u.rw.seg[i] = 521 ring_req->u.rw.seg[i] =
466 (struct blkif_request_segment) { 522 (struct blkif_request_segment) {
467 .gref = ref, 523 .gref = ref,
468 .first_sect = fsect, 524 .first_sect = fsect,
469 .last_sect = lsect }; 525 .last_sect = lsect };
526 } else {
527 n = i % SEGS_PER_INDIRECT_FRAME;
528 segments[n] =
529 (struct blkif_request_segment_aligned) {
530 .gref = ref,
531 .first_sect = fsect,
532 .last_sect = lsect };
533 }
470 } 534 }
535 if (segments)
536 kunmap_atomic(segments);
471 } 537 }
472 538
473 info->ring.req_prod_pvt++; 539 info->ring.req_prod_pvt++;
@@ -542,7 +608,9 @@ wait:
542 flush_requests(info); 608 flush_requests(info);
543} 609}
544 610
545static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 611static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
612 unsigned int physical_sector_size,
613 unsigned int segments)
546{ 614{
547 struct request_queue *rq; 615 struct request_queue *rq;
548 struct blkfront_info *info = gd->private_data; 616 struct blkfront_info *info = gd->private_data;
@@ -564,14 +632,15 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
564 632
565 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 633 /* Hard sector size and max sectors impersonate the equiv. hardware. */
566 blk_queue_logical_block_size(rq, sector_size); 634 blk_queue_logical_block_size(rq, sector_size);
567 blk_queue_max_hw_sectors(rq, 512); 635 blk_queue_physical_block_size(rq, physical_sector_size);
636 blk_queue_max_hw_sectors(rq, (segments * PAGE_SIZE) / 512);
568 637
569 /* Each segment in a request is up to an aligned page in size. */ 638 /* Each segment in a request is up to an aligned page in size. */
570 blk_queue_segment_boundary(rq, PAGE_SIZE - 1); 639 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
571 blk_queue_max_segment_size(rq, PAGE_SIZE); 640 blk_queue_max_segment_size(rq, PAGE_SIZE);
572 641
573 /* Ensure a merged request will fit in a single I/O ring slot. */ 642 /* Ensure a merged request will fit in a single I/O ring slot. */
574 blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 643 blk_queue_max_segments(rq, segments);
575 644
576 /* Make sure buffer addresses are sector-aligned. */ 645 /* Make sure buffer addresses are sector-aligned. */
577 blk_queue_dma_alignment(rq, 511); 646 blk_queue_dma_alignment(rq, 511);
@@ -588,13 +657,16 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
588static void xlvbd_flush(struct blkfront_info *info) 657static void xlvbd_flush(struct blkfront_info *info)
589{ 658{
590 blk_queue_flush(info->rq, info->feature_flush); 659 blk_queue_flush(info->rq, info->feature_flush);
591 printk(KERN_INFO "blkfront: %s: %s: %s %s\n", 660 printk(KERN_INFO "blkfront: %s: %s: %s %s %s %s %s\n",
592 info->gd->disk_name, 661 info->gd->disk_name,
593 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 662 info->flush_op == BLKIF_OP_WRITE_BARRIER ?
594 "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? 663 "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
595 "flush diskcache" : "barrier or flush"), 664 "flush diskcache" : "barrier or flush"),
596 info->feature_flush ? "enabled" : "disabled", 665 info->feature_flush ? "enabled;" : "disabled;",
597 info->feature_persistent ? "using persistent grants" : ""); 666 "persistent grants:",
667 info->feature_persistent ? "enabled;" : "disabled;",
668 "indirect descriptors:",
669 info->max_indirect_segments ? "enabled;" : "disabled;");
598} 670}
599 671
600static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) 672static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -667,7 +739,8 @@ static char *encode_disk_name(char *ptr, unsigned int n)
667 739
668static int xlvbd_alloc_gendisk(blkif_sector_t capacity, 740static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
669 struct blkfront_info *info, 741 struct blkfront_info *info,
670 u16 vdisk_info, u16 sector_size) 742 u16 vdisk_info, u16 sector_size,
743 unsigned int physical_sector_size)
671{ 744{
672 struct gendisk *gd; 745 struct gendisk *gd;
673 int nr_minors = 1; 746 int nr_minors = 1;
@@ -734,7 +807,9 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
734 gd->driverfs_dev = &(info->xbdev->dev); 807 gd->driverfs_dev = &(info->xbdev->dev);
735 set_capacity(gd, capacity); 808 set_capacity(gd, capacity);
736 809
737 if (xlvbd_init_blk_queue(gd, sector_size)) { 810 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
811 info->max_indirect_segments ? :
812 BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
738 del_gendisk(gd); 813 del_gendisk(gd);
739 goto release; 814 goto release;
740 } 815 }
@@ -818,6 +893,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
818{ 893{
819 struct grant *persistent_gnt; 894 struct grant *persistent_gnt;
820 struct grant *n; 895 struct grant *n;
896 int i, j, segs;
821 897
822 /* Prevent new requests being issued until we fix things up. */ 898 /* Prevent new requests being issued until we fix things up. */
823 spin_lock_irq(&info->io_lock); 899 spin_lock_irq(&info->io_lock);
@@ -843,6 +919,47 @@ static void blkif_free(struct blkfront_info *info, int suspend)
843 } 919 }
844 BUG_ON(info->persistent_gnts_c != 0); 920 BUG_ON(info->persistent_gnts_c != 0);
845 921
922 for (i = 0; i < BLK_RING_SIZE; i++) {
923 /*
924 * Clear persistent grants present in requests already
925 * on the shared ring
926 */
927 if (!info->shadow[i].request)
928 goto free_shadow;
929
930 segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
931 info->shadow[i].req.u.indirect.nr_segments :
932 info->shadow[i].req.u.rw.nr_segments;
933 for (j = 0; j < segs; j++) {
934 persistent_gnt = info->shadow[i].grants_used[j];
935 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
936 __free_page(pfn_to_page(persistent_gnt->pfn));
937 kfree(persistent_gnt);
938 }
939
940 if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT)
941 /*
942 * If this is not an indirect operation don't try to
943 * free indirect segments
944 */
945 goto free_shadow;
946
947 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
948 persistent_gnt = info->shadow[i].indirect_grants[j];
949 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
950 __free_page(pfn_to_page(persistent_gnt->pfn));
951 kfree(persistent_gnt);
952 }
953
954free_shadow:
955 kfree(info->shadow[i].grants_used);
956 info->shadow[i].grants_used = NULL;
957 kfree(info->shadow[i].indirect_grants);
958 info->shadow[i].indirect_grants = NULL;
959 kfree(info->shadow[i].sg);
960 info->shadow[i].sg = NULL;
961 }
962
846 /* No more gnttab callback work. */ 963 /* No more gnttab callback work. */
847 gnttab_cancel_free_callback(&info->callback); 964 gnttab_cancel_free_callback(&info->callback);
848 spin_unlock_irq(&info->io_lock); 965 spin_unlock_irq(&info->io_lock);
@@ -867,12 +984,13 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
867 struct blkif_response *bret) 984 struct blkif_response *bret)
868{ 985{
869 int i = 0; 986 int i = 0;
870 struct bio_vec *bvec; 987 struct scatterlist *sg;
871 struct req_iterator iter;
872 unsigned long flags;
873 char *bvec_data; 988 char *bvec_data;
874 void *shared_data; 989 void *shared_data;
875 unsigned int offset = 0; 990 int nseg;
991
992 nseg = s->req.operation == BLKIF_OP_INDIRECT ?
993 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
876 994
877 if (bret->operation == BLKIF_OP_READ) { 995 if (bret->operation == BLKIF_OP_READ) {
878 /* 996 /*
@@ -881,26 +999,29 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
881 * than PAGE_SIZE, we have to keep track of the current offset, 999 * than PAGE_SIZE, we have to keep track of the current offset,
882 * to be sure we are copying the data from the right shared page. 1000 * to be sure we are copying the data from the right shared page.
883 */ 1001 */
884 rq_for_each_segment(bvec, s->request, iter) { 1002 for_each_sg(s->sg, sg, nseg, i) {
885 BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); 1003 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
886 if (bvec->bv_offset < offset)
887 i++;
888 BUG_ON(i >= s->req.u.rw.nr_segments);
889 shared_data = kmap_atomic( 1004 shared_data = kmap_atomic(
890 pfn_to_page(s->grants_used[i]->pfn)); 1005 pfn_to_page(s->grants_used[i]->pfn));
891 bvec_data = bvec_kmap_irq(bvec, &flags); 1006 bvec_data = kmap_atomic(sg_page(sg));
892 memcpy(bvec_data, shared_data + bvec->bv_offset, 1007 memcpy(bvec_data + sg->offset,
893 bvec->bv_len); 1008 shared_data + sg->offset,
894 bvec_kunmap_irq(bvec_data, &flags); 1009 sg->length);
1010 kunmap_atomic(bvec_data);
895 kunmap_atomic(shared_data); 1011 kunmap_atomic(shared_data);
896 offset = bvec->bv_offset + bvec->bv_len;
897 } 1012 }
898 } 1013 }
899 /* Add the persistent grant into the list of free grants */ 1014 /* Add the persistent grant into the list of free grants */
900 for (i = 0; i < s->req.u.rw.nr_segments; i++) { 1015 for (i = 0; i < nseg; i++) {
901 list_add(&s->grants_used[i]->node, &info->persistent_gnts); 1016 list_add(&s->grants_used[i]->node, &info->persistent_gnts);
902 info->persistent_gnts_c++; 1017 info->persistent_gnts_c++;
903 } 1018 }
1019 if (s->req.operation == BLKIF_OP_INDIRECT) {
1020 for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
1021 list_add(&s->indirect_grants[i]->node, &info->persistent_gnts);
1022 info->persistent_gnts_c++;
1023 }
1024 }
904} 1025}
905 1026
906static irqreturn_t blkif_interrupt(int irq, void *dev_id) 1027static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -1034,14 +1155,6 @@ static int setup_blkring(struct xenbus_device *dev,
1034 SHARED_RING_INIT(sring); 1155 SHARED_RING_INIT(sring);
1035 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 1156 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
1036 1157
1037 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
1038
1039 /* Allocate memory for grants */
1040 err = fill_grant_buffer(info, BLK_RING_SIZE *
1041 BLKIF_MAX_SEGMENTS_PER_REQUEST);
1042 if (err)
1043 goto fail;
1044
1045 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 1158 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
1046 if (err < 0) { 1159 if (err < 0) {
1047 free_page((unsigned long)sring); 1160 free_page((unsigned long)sring);
@@ -1223,13 +1336,84 @@ static int blkfront_probe(struct xenbus_device *dev,
1223 return 0; 1336 return 0;
1224} 1337}
1225 1338
1339/*
1340 * This is a clone of md_trim_bio, used to split a bio into smaller ones
1341 */
1342static void trim_bio(struct bio *bio, int offset, int size)
1343{
1344 /* 'bio' is a cloned bio which we need to trim to match
1345 * the given offset and size.
1346 * This requires adjusting bi_sector, bi_size, and bi_io_vec
1347 */
1348 int i;
1349 struct bio_vec *bvec;
1350 int sofar = 0;
1351
1352 size <<= 9;
1353 if (offset == 0 && size == bio->bi_size)
1354 return;
1355
1356 bio->bi_sector += offset;
1357 bio->bi_size = size;
1358 offset <<= 9;
1359 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1360
1361 while (bio->bi_idx < bio->bi_vcnt &&
1362 bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
1363 /* remove this whole bio_vec */
1364 offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
1365 bio->bi_idx++;
1366 }
1367 if (bio->bi_idx < bio->bi_vcnt) {
1368 bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
1369 bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
1370 }
1371 /* avoid any complications with bi_idx being non-zero*/
1372 if (bio->bi_idx) {
1373 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
1374 (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
1375 bio->bi_vcnt -= bio->bi_idx;
1376 bio->bi_idx = 0;
1377 }
1378 /* Make sure vcnt and last bv are not too big */
1379 bio_for_each_segment(bvec, bio, i) {
1380 if (sofar + bvec->bv_len > size)
1381 bvec->bv_len = size - sofar;
1382 if (bvec->bv_len == 0) {
1383 bio->bi_vcnt = i;
1384 break;
1385 }
1386 sofar += bvec->bv_len;
1387 }
1388}
1389
1390static void split_bio_end(struct bio *bio, int error)
1391{
1392 struct split_bio *split_bio = bio->bi_private;
1393
1394 if (error)
1395 split_bio->err = error;
1396
1397 if (atomic_dec_and_test(&split_bio->pending)) {
1398 split_bio->bio->bi_phys_segments = 0;
1399 bio_endio(split_bio->bio, split_bio->err);
1400 kfree(split_bio);
1401 }
1402 bio_put(bio);
1403}
1226 1404
1227static int blkif_recover(struct blkfront_info *info) 1405static int blkif_recover(struct blkfront_info *info)
1228{ 1406{
1229 int i; 1407 int i;
1230 struct blkif_request *req; 1408 struct request *req, *n;
1231 struct blk_shadow *copy; 1409 struct blk_shadow *copy;
1232 int j; 1410 int rc;
1411 struct bio *bio, *cloned_bio;
1412 struct bio_list bio_list, merge_bio;
1413 unsigned int segs, offset;
1414 int pending, size;
1415 struct split_bio *split_bio;
1416 struct list_head requests;
1233 1417
1234 /* Stage 1: Make a safe copy of the shadow state. */ 1418 /* Stage 1: Make a safe copy of the shadow state. */
1235 copy = kmemdup(info->shadow, sizeof(info->shadow), 1419 copy = kmemdup(info->shadow, sizeof(info->shadow),
@@ -1244,36 +1428,64 @@ static int blkif_recover(struct blkfront_info *info)
1244 info->shadow_free = info->ring.req_prod_pvt; 1428 info->shadow_free = info->ring.req_prod_pvt;
1245 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; 1429 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
1246 1430
1247 /* Stage 3: Find pending requests and requeue them. */ 1431 rc = blkfront_setup_indirect(info);
1432 if (rc) {
1433 kfree(copy);
1434 return rc;
1435 }
1436
1437 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
1438 blk_queue_max_segments(info->rq, segs);
1439 bio_list_init(&bio_list);
1440 INIT_LIST_HEAD(&requests);
1248 for (i = 0; i < BLK_RING_SIZE; i++) { 1441 for (i = 0; i < BLK_RING_SIZE; i++) {
1249 /* Not in use? */ 1442 /* Not in use? */
1250 if (!copy[i].request) 1443 if (!copy[i].request)
1251 continue; 1444 continue;
1252 1445
1253 /* Grab a request slot and copy shadow state into it. */ 1446 /*
1254 req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 1447 * Get the bios in the request so we can re-queue them.
1255 *req = copy[i].req; 1448 */
1256 1449 if (copy[i].request->cmd_flags &
1257 /* We get a new request id, and must reset the shadow state. */ 1450 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
1258 req->u.rw.id = get_id_from_freelist(info); 1451 /*
1259 memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i])); 1452 * Flush operations don't contain bios, so
1260 1453 * we need to requeue the whole request
1261 if (req->operation != BLKIF_OP_DISCARD) { 1454 */
1262 /* Rewrite any grant references invalidated by susp/resume. */ 1455 list_add(&copy[i].request->queuelist, &requests);
1263 for (j = 0; j < req->u.rw.nr_segments; j++) 1456 continue;
1264 gnttab_grant_foreign_access_ref(
1265 req->u.rw.seg[j].gref,
1266 info->xbdev->otherend_id,
1267 pfn_to_mfn(copy[i].grants_used[j]->pfn),
1268 0);
1269 } 1457 }
1270 info->shadow[req->u.rw.id].req = *req; 1458 merge_bio.head = copy[i].request->bio;
1271 1459 merge_bio.tail = copy[i].request->biotail;
1272 info->ring.req_prod_pvt++; 1460 bio_list_merge(&bio_list, &merge_bio);
1461 copy[i].request->bio = NULL;
1462 blk_put_request(copy[i].request);
1273 } 1463 }
1274 1464
1275 kfree(copy); 1465 kfree(copy);
1276 1466
1467 /*
1468 * Empty the queue, this is important because we might have
1469 * requests in the queue with more segments than what we
1470 * can handle now.
1471 */
1472 spin_lock_irq(&info->io_lock);
1473 while ((req = blk_fetch_request(info->rq)) != NULL) {
1474 if (req->cmd_flags &
1475 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
1476 list_add(&req->queuelist, &requests);
1477 continue;
1478 }
1479 merge_bio.head = req->bio;
1480 merge_bio.tail = req->biotail;
1481 bio_list_merge(&bio_list, &merge_bio);
1482 req->bio = NULL;
1483 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA))
1484 pr_alert("diskcache flush request found!\n");
1485 __blk_put_request(info->rq, req);
1486 }
1487 spin_unlock_irq(&info->io_lock);
1488
1277 xenbus_switch_state(info->xbdev, XenbusStateConnected); 1489 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1278 1490
1279 spin_lock_irq(&info->io_lock); 1491 spin_lock_irq(&info->io_lock);
@@ -1281,14 +1493,50 @@ static int blkif_recover(struct blkfront_info *info)
1281 /* Now safe for us to use the shared ring */ 1493 /* Now safe for us to use the shared ring */
1282 info->connected = BLKIF_STATE_CONNECTED; 1494 info->connected = BLKIF_STATE_CONNECTED;
1283 1495
1284 /* Send off requeued requests */
1285 flush_requests(info);
1286
1287 /* Kick any other new requests queued since we resumed */ 1496 /* Kick any other new requests queued since we resumed */
1288 kick_pending_request_queues(info); 1497 kick_pending_request_queues(info);
1289 1498
1499 list_for_each_entry_safe(req, n, &requests, queuelist) {
1500 /* Requeue pending requests (flush or discard) */
1501 list_del_init(&req->queuelist);
1502 BUG_ON(req->nr_phys_segments > segs);
1503 blk_requeue_request(info->rq, req);
1504 }
1290 spin_unlock_irq(&info->io_lock); 1505 spin_unlock_irq(&info->io_lock);
1291 1506
1507 while ((bio = bio_list_pop(&bio_list)) != NULL) {
1508 /* Traverse the list of pending bios and re-queue them */
1509 if (bio_segments(bio) > segs) {
1510 /*
1511 * This bio has more segments than what we can
1512 * handle, we have to split it.
1513 */
1514 pending = (bio_segments(bio) + segs - 1) / segs;
1515 split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
1516 BUG_ON(split_bio == NULL);
1517 atomic_set(&split_bio->pending, pending);
1518 split_bio->bio = bio;
1519 for (i = 0; i < pending; i++) {
1520 offset = (i * segs * PAGE_SIZE) >> 9;
1521 size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
1522 (unsigned int)(bio->bi_size >> 9) - offset);
1523 cloned_bio = bio_clone(bio, GFP_NOIO);
1524 BUG_ON(cloned_bio == NULL);
1525 trim_bio(cloned_bio, offset, size);
1526 cloned_bio->bi_private = split_bio;
1527 cloned_bio->bi_end_io = split_bio_end;
1528 submit_bio(cloned_bio->bi_rw, cloned_bio);
1529 }
1530 /*
1531 * Now we have to wait for all those smaller bios to
1532 * end, so we can also end the "parent" bio.
1533 */
1534 continue;
1535 }
1536 /* We don't need to split this bio */
1537 submit_bio(bio->bi_rw, bio);
1538 }
1539
1292 return 0; 1540 return 0;
1293} 1541}
1294 1542
@@ -1308,8 +1556,12 @@ static int blkfront_resume(struct xenbus_device *dev)
1308 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 1556 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
1309 1557
1310 err = talk_to_blkback(dev, info); 1558 err = talk_to_blkback(dev, info);
1311 if (info->connected == BLKIF_STATE_SUSPENDED && !err) 1559
1312 err = blkif_recover(info); 1560 /*
1561 * We have to wait for the backend to switch to
1562 * connected state, since we want to read which
1563 * features it supports.
1564 */
1313 1565
1314 return err; 1566 return err;
1315} 1567}
@@ -1387,6 +1639,60 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1387 kfree(type); 1639 kfree(type);
1388} 1640}
1389 1641
1642static int blkfront_setup_indirect(struct blkfront_info *info)
1643{
1644 unsigned int indirect_segments, segs;
1645 int err, i;
1646
1647 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1648 "feature-max-indirect-segments", "%u", &indirect_segments,
1649 NULL);
1650 if (err) {
1651 info->max_indirect_segments = 0;
1652 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1653 } else {
1654 info->max_indirect_segments = min(indirect_segments,
1655 xen_blkif_max_segments);
1656 segs = info->max_indirect_segments;
1657 }
1658
1659 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE);
1660 if (err)
1661 goto out_of_memory;
1662
1663 for (i = 0; i < BLK_RING_SIZE; i++) {
1664 info->shadow[i].grants_used = kzalloc(
1665 sizeof(info->shadow[i].grants_used[0]) * segs,
1666 GFP_NOIO);
1667 info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO);
1668 if (info->max_indirect_segments)
1669 info->shadow[i].indirect_grants = kzalloc(
1670 sizeof(info->shadow[i].indirect_grants[0]) *
1671 INDIRECT_GREFS(segs),
1672 GFP_NOIO);
1673 if ((info->shadow[i].grants_used == NULL) ||
1674 (info->shadow[i].sg == NULL) ||
1675 (info->max_indirect_segments &&
1676 (info->shadow[i].indirect_grants == NULL)))
1677 goto out_of_memory;
1678 sg_init_table(info->shadow[i].sg, segs);
1679 }
1680
1681
1682 return 0;
1683
1684out_of_memory:
1685 for (i = 0; i < BLK_RING_SIZE; i++) {
1686 kfree(info->shadow[i].grants_used);
1687 info->shadow[i].grants_used = NULL;
1688 kfree(info->shadow[i].sg);
1689 info->shadow[i].sg = NULL;
1690 kfree(info->shadow[i].indirect_grants);
1691 info->shadow[i].indirect_grants = NULL;
1692 }
1693 return -ENOMEM;
1694}
1695
1390/* 1696/*
1391 * Invoked when the backend is finally 'ready' (and has told produced 1697 * Invoked when the backend is finally 'ready' (and has told produced
1392 * the details about the physical device - #sectors, size, etc). 1698 * the details about the physical device - #sectors, size, etc).
@@ -1395,6 +1701,7 @@ static void blkfront_connect(struct blkfront_info *info)
1395{ 1701{
1396 unsigned long long sectors; 1702 unsigned long long sectors;
1397 unsigned long sector_size; 1703 unsigned long sector_size;
1704 unsigned int physical_sector_size;
1398 unsigned int binfo; 1705 unsigned int binfo;
1399 int err; 1706 int err;
1400 int barrier, flush, discard, persistent; 1707 int barrier, flush, discard, persistent;
@@ -1414,8 +1721,15 @@ static void blkfront_connect(struct blkfront_info *info)
1414 set_capacity(info->gd, sectors); 1721 set_capacity(info->gd, sectors);
1415 revalidate_disk(info->gd); 1722 revalidate_disk(info->gd);
1416 1723
1417 /* fall through */ 1724 return;
1418 case BLKIF_STATE_SUSPENDED: 1725 case BLKIF_STATE_SUSPENDED:
1726 /*
1727 * If we are recovering from suspension, we need to wait
1728 * for the backend to announce it's features before
1729 * reconnecting, at least we need to know if the backend
1730 * supports indirect descriptors, and how many.
1731 */
1732 blkif_recover(info);
1419 return; 1733 return;
1420 1734
1421 default: 1735 default:
@@ -1437,6 +1751,16 @@ static void blkfront_connect(struct blkfront_info *info)
1437 return; 1751 return;
1438 } 1752 }
1439 1753
1754 /*
1755 * physcial-sector-size is a newer field, so old backends may not
1756 * provide this. Assume physical sector size to be the same as
1757 * sector_size in that case.
1758 */
1759 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1760 "physical-sector-size", "%u", &physical_sector_size);
1761 if (err != 1)
1762 physical_sector_size = sector_size;
1763
1440 info->feature_flush = 0; 1764 info->feature_flush = 0;
1441 info->flush_op = 0; 1765 info->flush_op = 0;
1442 1766
@@ -1483,7 +1807,15 @@ static void blkfront_connect(struct blkfront_info *info)
1483 else 1807 else
1484 info->feature_persistent = persistent; 1808 info->feature_persistent = persistent;
1485 1809
1486 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1810 err = blkfront_setup_indirect(info);
1811 if (err) {
1812 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
1813 info->xbdev->otherend);
1814 return;
1815 }
1816
1817 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
1818 physical_sector_size);
1487 if (err) { 1819 if (err) {
1488 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 1820 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
1489 info->xbdev->otherend); 1821 info->xbdev->otherend);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 11f467c00d0a..a12b923bbaca 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -91,6 +91,10 @@ static struct usb_device_id ath3k_table[] = {
91 { USB_DEVICE(0x0489, 0xe04e) }, 91 { USB_DEVICE(0x0489, 0xe04e) },
92 { USB_DEVICE(0x0489, 0xe056) }, 92 { USB_DEVICE(0x0489, 0xe056) },
93 { USB_DEVICE(0x0489, 0xe04d) }, 93 { USB_DEVICE(0x0489, 0xe04d) },
94 { USB_DEVICE(0x04c5, 0x1330) },
95 { USB_DEVICE(0x13d3, 0x3402) },
96 { USB_DEVICE(0x0cf3, 0x3121) },
97 { USB_DEVICE(0x0cf3, 0xe003) },
94 98
95 /* Atheros AR5BBU12 with sflash firmware */ 99 /* Atheros AR5BBU12 with sflash firmware */
96 { USB_DEVICE(0x0489, 0xE02C) }, 100 { USB_DEVICE(0x0489, 0xE02C) },
@@ -128,6 +132,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
128 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
129 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
137 { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
138 { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
131 139
132 /* Atheros AR5BBU22 with sflash firmware */ 140 /* Atheros AR5BBU22 with sflash firmware */
133 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, 141 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
@@ -193,24 +201,44 @@ error:
193 201
194static int ath3k_get_state(struct usb_device *udev, unsigned char *state) 202static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
195{ 203{
196 int pipe = 0; 204 int ret, pipe = 0;
205 char *buf;
206
207 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
208 if (!buf)
209 return -ENOMEM;
197 210
198 pipe = usb_rcvctrlpipe(udev, 0); 211 pipe = usb_rcvctrlpipe(udev, 0);
199 return usb_control_msg(udev, pipe, ATH3K_GETSTATE, 212 ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE,
200 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, 213 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
201 state, 0x01, USB_CTRL_SET_TIMEOUT); 214 buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT);
215
216 *state = *buf;
217 kfree(buf);
218
219 return ret;
202} 220}
203 221
204static int ath3k_get_version(struct usb_device *udev, 222static int ath3k_get_version(struct usb_device *udev,
205 struct ath3k_version *version) 223 struct ath3k_version *version)
206{ 224{
207 int pipe = 0; 225 int ret, pipe = 0;
226 struct ath3k_version *buf;
227 const int size = sizeof(*buf);
228
229 buf = kmalloc(size, GFP_KERNEL);
230 if (!buf)
231 return -ENOMEM;
208 232
209 pipe = usb_rcvctrlpipe(udev, 0); 233 pipe = usb_rcvctrlpipe(udev, 0);
210 return usb_control_msg(udev, pipe, ATH3K_GETVERSION, 234 ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION,
211 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, 235 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
212 sizeof(struct ath3k_version), 236 buf, size, USB_CTRL_SET_TIMEOUT);
213 USB_CTRL_SET_TIMEOUT); 237
238 memcpy(version, buf, size);
239 kfree(buf);
240
241 return ret;
214} 242}
215 243
216static int ath3k_load_fwfile(struct usb_device *udev, 244static int ath3k_load_fwfile(struct usb_device *udev,
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index de4cf4daa2f4..8e16f0af6358 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -154,6 +154,10 @@ static struct usb_device_id blacklist_table[] = {
154 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, 155 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
156 { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
158 { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
159 { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
160 { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
157 161
158 /* Atheros AR5BBU12 with sflash firmware */ 162 /* Atheros AR5BBU12 with sflash firmware */
159 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, 163 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -1095,7 +1099,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
1095 if (IS_ERR(skb)) { 1099 if (IS_ERR(skb)) {
1096 BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)", 1100 BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
1097 hdev->name, cmd->opcode, PTR_ERR(skb)); 1101 hdev->name, cmd->opcode, PTR_ERR(skb));
1098 return -PTR_ERR(skb); 1102 return PTR_ERR(skb);
1099 } 1103 }
1100 1104
1101 /* It ensures that the returned event matches the event data read from 1105 /* It ensures that the returned event matches the event data read from
@@ -1147,7 +1151,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1147 if (IS_ERR(skb)) { 1151 if (IS_ERR(skb)) {
1148 BT_ERR("%s sending initial HCI reset command failed (%ld)", 1152 BT_ERR("%s sending initial HCI reset command failed (%ld)",
1149 hdev->name, PTR_ERR(skb)); 1153 hdev->name, PTR_ERR(skb));
1150 return -PTR_ERR(skb); 1154 return PTR_ERR(skb);
1151 } 1155 }
1152 kfree_skb(skb); 1156 kfree_skb(skb);
1153 1157
@@ -1161,7 +1165,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1161 if (IS_ERR(skb)) { 1165 if (IS_ERR(skb)) {
1162 BT_ERR("%s reading Intel fw version command failed (%ld)", 1166 BT_ERR("%s reading Intel fw version command failed (%ld)",
1163 hdev->name, PTR_ERR(skb)); 1167 hdev->name, PTR_ERR(skb));
1164 return -PTR_ERR(skb); 1168 return PTR_ERR(skb);
1165 } 1169 }
1166 1170
1167 if (skb->len != sizeof(*ver)) { 1171 if (skb->len != sizeof(*ver)) {
@@ -1219,7 +1223,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1219 BT_ERR("%s entering Intel manufacturer mode failed (%ld)", 1223 BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
1220 hdev->name, PTR_ERR(skb)); 1224 hdev->name, PTR_ERR(skb));
1221 release_firmware(fw); 1225 release_firmware(fw);
1222 return -PTR_ERR(skb); 1226 return PTR_ERR(skb);
1223 } 1227 }
1224 1228
1225 if (skb->data[0]) { 1229 if (skb->data[0]) {
@@ -1276,7 +1280,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1276 if (IS_ERR(skb)) { 1280 if (IS_ERR(skb)) {
1277 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", 1281 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1278 hdev->name, PTR_ERR(skb)); 1282 hdev->name, PTR_ERR(skb));
1279 return -PTR_ERR(skb); 1283 return PTR_ERR(skb);
1280 } 1284 }
1281 kfree_skb(skb); 1285 kfree_skb(skb);
1282 1286
@@ -1292,7 +1296,7 @@ exit_mfg_disable:
1292 if (IS_ERR(skb)) { 1296 if (IS_ERR(skb)) {
1293 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", 1297 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1294 hdev->name, PTR_ERR(skb)); 1298 hdev->name, PTR_ERR(skb));
1295 return -PTR_ERR(skb); 1299 return PTR_ERR(skb);
1296 } 1300 }
1297 kfree_skb(skb); 1301 kfree_skb(skb);
1298 1302
@@ -1310,7 +1314,7 @@ exit_mfg_deactivate:
1310 if (IS_ERR(skb)) { 1314 if (IS_ERR(skb)) {
1311 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", 1315 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1312 hdev->name, PTR_ERR(skb)); 1316 hdev->name, PTR_ERR(skb));
1313 return -PTR_ERR(skb); 1317 return PTR_ERR(skb);
1314 } 1318 }
1315 kfree_skb(skb); 1319 kfree_skb(skb);
1316 1320
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index bf5d2477cb77..15f2e7025b78 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -129,7 +129,8 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
129 off_t j, io_pg_start; 129 off_t j, io_pg_start;
130 int io_pg_count; 130 int io_pg_count;
131 131
132 if (type != 0 || mem->type != 0) { 132 if (type != mem->type ||
133 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
133 return -EINVAL; 134 return -EINVAL;
134 } 135 }
135 136
@@ -175,7 +176,8 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
175 struct _parisc_agp_info *info = &parisc_agp_info; 176 struct _parisc_agp_info *info = &parisc_agp_info;
176 int i, io_pg_start, io_pg_count; 177 int i, io_pg_start, io_pg_count;
177 178
178 if (type != 0 || mem->type != 0) { 179 if (type != mem->type ||
180 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
179 return -EINVAL; 181 return -EINVAL;
180 } 182 }
181 183
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 1b456fe9b87a..fc45567ad3ac 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
272 unsigned long flags; 272 unsigned long flags;
273 273
274 spin_lock_irqsave(&portdev->ports_lock, flags); 274 spin_lock_irqsave(&portdev->ports_lock, flags);
275 list_for_each_entry(port, &portdev->ports, list) 275 list_for_each_entry(port, &portdev->ports, list) {
276 if (port->cdev->dev == dev) 276 if (port->cdev->dev == dev) {
277 kref_get(&port->kref);
277 goto out; 278 goto out;
279 }
280 }
278 port = NULL; 281 port = NULL;
279out: 282out:
280 spin_unlock_irqrestore(&portdev->ports_lock, flags); 283 spin_unlock_irqrestore(&portdev->ports_lock, flags);
@@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
746 749
747 port = filp->private_data; 750 port = filp->private_data;
748 751
752 /* Port is hot-unplugged. */
753 if (!port->guest_connected)
754 return -ENODEV;
755
749 if (!port_has_data(port)) { 756 if (!port_has_data(port)) {
750 /* 757 /*
751 * If nothing's connected on the host just return 0 in 758 * If nothing's connected on the host just return 0 in
@@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
762 if (ret < 0) 769 if (ret < 0)
763 return ret; 770 return ret;
764 } 771 }
765 /* Port got hot-unplugged. */ 772 /* Port got hot-unplugged while we were waiting above. */
766 if (!port->guest_connected) 773 if (!port->guest_connected)
767 return -ENODEV; 774 return -ENODEV;
768 /* 775 /*
@@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
932 if (is_rproc_serial(port->out_vq->vdev)) 939 if (is_rproc_serial(port->out_vq->vdev))
933 return -EINVAL; 940 return -EINVAL;
934 941
942 /*
943 * pipe->nrbufs == 0 means there are no data to transfer,
944 * so this returns just 0 for no data.
945 */
946 pipe_lock(pipe);
947 if (!pipe->nrbufs) {
948 ret = 0;
949 goto error_out;
950 }
951
935 ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); 952 ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
936 if (ret < 0) 953 if (ret < 0)
937 return ret; 954 goto error_out;
938 955
939 buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); 956 buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
940 if (!buf) 957 if (!buf) {
941 return -ENOMEM; 958 ret = -ENOMEM;
959 goto error_out;
960 }
942 961
943 sgl.n = 0; 962 sgl.n = 0;
944 sgl.len = 0; 963 sgl.len = 0;
@@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
946 sgl.sg = buf->sg; 965 sgl.sg = buf->sg;
947 sg_init_table(sgl.sg, sgl.size); 966 sg_init_table(sgl.sg, sgl.size);
948 ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); 967 ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
968 pipe_unlock(pipe);
949 if (likely(ret > 0)) 969 if (likely(ret > 0))
950 ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); 970 ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
951 971
952 if (unlikely(ret <= 0)) 972 if (unlikely(ret <= 0))
953 free_buf(buf, true); 973 free_buf(buf, true);
954 return ret; 974 return ret;
975
976error_out:
977 pipe_unlock(pipe);
978 return ret;
955} 979}
956 980
957static unsigned int port_fops_poll(struct file *filp, poll_table *wait) 981static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
@@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp)
1019 struct port *port; 1043 struct port *port;
1020 int ret; 1044 int ret;
1021 1045
1046 /* We get the port with a kref here */
1022 port = find_port_by_devt(cdev->dev); 1047 port = find_port_by_devt(cdev->dev);
1048 if (!port) {
1049 /* Port was unplugged before we could proceed */
1050 return -ENXIO;
1051 }
1023 filp->private_data = port; 1052 filp->private_data = port;
1024 1053
1025 /* Prevent against a port getting hot-unplugged at the same time */
1026 spin_lock_irq(&port->portdev->ports_lock);
1027 kref_get(&port->kref);
1028 spin_unlock_irq(&port->portdev->ports_lock);
1029
1030 /* 1054 /*
1031 * Don't allow opening of console port devices -- that's done 1055 * Don't allow opening of console port devices -- that's done
1032 * via /dev/hvc 1056 * via /dev/hvc
@@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref)
1498 1522
1499 port = container_of(kref, struct port, kref); 1523 port = container_of(kref, struct port, kref);
1500 1524
1501 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1502 device_destroy(pdrvdata.class, port->dev->devt);
1503 cdev_del(port->cdev);
1504
1505 kfree(port->name);
1506
1507 debugfs_remove(port->debugfs_file);
1508
1509 kfree(port); 1525 kfree(port);
1510} 1526}
1511 1527
@@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port)
1539 spin_unlock_irq(&port->portdev->ports_lock); 1555 spin_unlock_irq(&port->portdev->ports_lock);
1540 1556
1541 if (port->guest_connected) { 1557 if (port->guest_connected) {
1558 /* Let the app know the port is going down. */
1559 send_sigio_to_port(port);
1560
1561 /* Do this after sigio is actually sent */
1542 port->guest_connected = false; 1562 port->guest_connected = false;
1543 port->host_connected = false; 1563 port->host_connected = false;
1544 wake_up_interruptible(&port->waitqueue);
1545 1564
1546 /* Let the app know the port is going down. */ 1565 wake_up_interruptible(&port->waitqueue);
1547 send_sigio_to_port(port);
1548 } 1566 }
1549 1567
1550 if (is_console_port(port)) { 1568 if (is_console_port(port)) {
@@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port)
1563 */ 1581 */
1564 port->portdev = NULL; 1582 port->portdev = NULL;
1565 1583
1584 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1585 device_destroy(pdrvdata.class, port->dev->devt);
1586 cdev_del(port->cdev);
1587
1588 kfree(port->name);
1589
1590 debugfs_remove(port->debugfs_file);
1591
1566 /* 1592 /*
1567 * Locks around here are not necessary - a port can't be 1593 * Locks around here are not necessary - a port can't be
1568 * opened after we removed the port struct from ports_list 1594 * opened after we removed the port struct from ports_list
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 1bdb882c845b..4e5739773c33 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
581 DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), 581 DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
582 DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), 582 DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
583 DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), 583 DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
584 DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3), 584 DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
585 DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3), 585 CLK_GET_RATE_NOCACHE, 0),
586 DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
587 CLK_GET_RATE_NOCACHE, 0),
586 DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), 588 DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
587 DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3), 589 DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
588 DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3), 590 4, 3, CLK_GET_RATE_NOCACHE, 0),
591 DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
592 8, 3, CLK_GET_RATE_NOCACHE, 0),
589 DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), 593 DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
590}; 594};
591 595
@@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
863 GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", 867 GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
864 E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), 868 E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
865 GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, 869 GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
866 CLK_IGNORE_UNUSED, 0), 870 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
867 GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, 871 GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
868 CLK_IGNORE_UNUSED, 0), 872 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
869 GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, 873 GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2,
870 CLK_IGNORE_UNUSED, 0), 874 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
871 GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, 875 GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
872 CLK_IGNORE_UNUSED, 0), 876 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
873 GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, 877 GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
874 CLK_IGNORE_UNUSED, 0), 878 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
875 GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, 879 GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
876 CLK_IGNORE_UNUSED, 0), 880 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
877 GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, 881 GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
878 CLK_IGNORE_UNUSED, 0), 882 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
879 GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, 883 GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
880 CLK_IGNORE_UNUSED, 0), 884 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
881 GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, 885 GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
882 CLK_IGNORE_UNUSED, 0), 886 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
883 GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, 887 GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
884 CLK_IGNORE_UNUSED, 0), 888 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
885 GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, 889 GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
886 CLK_IGNORE_UNUSED, 0), 890 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
887 GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, 891 GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
888 CLK_IGNORE_UNUSED, 0), 892 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
889 GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, 893 GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
890 CLK_IGNORE_UNUSED, 0), 894 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
891 GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, 895 GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
892 CLK_IGNORE_UNUSED, 0), 896 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
893 GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, 897 GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
894 CLK_IGNORE_UNUSED, 0), 898 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
895 GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, 899 GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
896 CLK_IGNORE_UNUSED, 0), 900 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
897 GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, 901 GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
898 CLK_IGNORE_UNUSED, 0), 902 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
899 GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, 903 GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
900 CLK_IGNORE_UNUSED, 0), 904 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
901 GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, 905 GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
902 CLK_IGNORE_UNUSED, 0), 906 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
903 GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, 907 GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
904 CLK_IGNORE_UNUSED, 0), 908 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
905 GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, 909 GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
906 CLK_IGNORE_UNUSED, 0), 910 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
907 GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, 911 GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
908 CLK_IGNORE_UNUSED, 0), 912 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
909 GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, 913 GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
910 CLK_IGNORE_UNUSED, 0), 914 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
911 GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, 915 GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
912 CLK_IGNORE_UNUSED, 0), 916 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
913 GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, 917 GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
914 CLK_IGNORE_UNUSED, 0), 918 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
915 GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, 919 GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
916 CLK_IGNORE_UNUSED, 0), 920 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
917 GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), 921 GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
918}; 922};
919 923
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 5c205b60a82a..089d3e30e221 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock);
71static DEFINE_SPINLOCK(ddrpll_lock); 71static DEFINE_SPINLOCK(ddrpll_lock);
72static DEFINE_SPINLOCK(iopll_lock); 72static DEFINE_SPINLOCK(iopll_lock);
73static DEFINE_SPINLOCK(armclk_lock); 73static DEFINE_SPINLOCK(armclk_lock);
74static DEFINE_SPINLOCK(swdtclk_lock);
74static DEFINE_SPINLOCK(ddrclk_lock); 75static DEFINE_SPINLOCK(ddrclk_lock);
75static DEFINE_SPINLOCK(dciclk_lock); 76static DEFINE_SPINLOCK(dciclk_lock);
76static DEFINE_SPINLOCK(gem0clk_lock); 77static DEFINE_SPINLOCK(gem0clk_lock);
@@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np)
293 } 294 }
294 clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], 295 clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
295 swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, 296 swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT,
296 SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock); 297 SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock);
297 298
298 /* DDR clocks */ 299 /* DDR clocks */
299 clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, 300 clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
@@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np)
364 CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, 365 CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6,
365 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, 366 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
366 &gem0clk_lock); 367 &gem0clk_lock);
367 clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0, 368 clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
368 SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock); 369 CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0,
370 &gem0clk_lock);
369 clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], 371 clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
370 "gem0_emio_mux", CLK_SET_RATE_PARENT, 372 "gem0_emio_mux", CLK_SET_RATE_PARENT,
371 SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); 373 SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock);
@@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np)
386 CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, 388 CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6,
387 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, 389 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
388 &gem1clk_lock); 390 &gem1clk_lock);
389 clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0, 391 clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
390 SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock); 392 CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0,
393 &gem1clk_lock);
391 clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], 394 clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
392 "gem1_emio_mux", CLK_SET_RATE_PARENT, 395 "gem1_emio_mux", CLK_SET_RATE_PARENT,
393 SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); 396 SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a4ad7339588d..f0a5e2b0eb8a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1177,14 +1177,11 @@ static int __cpufreq_remove_dev(struct device *dev,
1177 __func__, cpu_dev->id, cpu); 1177 __func__, cpu_dev->id, cpu);
1178 } 1178 }
1179 1179
1180 if ((cpus == 1) && (cpufreq_driver->target))
1181 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1182
1183 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1184 cpufreq_cpu_put(data);
1185
1186 /* If cpu is last user of policy, free policy */ 1180 /* If cpu is last user of policy, free policy */
1187 if (cpus == 1) { 1181 if (cpus == 1) {
1182 if (cpufreq_driver->target)
1183 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1184
1188 lock_policy_rwsem_read(cpu); 1185 lock_policy_rwsem_read(cpu);
1189 kobj = &data->kobj; 1186 kobj = &data->kobj;
1190 cmp = &data->kobj_unregister; 1187 cmp = &data->kobj_unregister;
@@ -1205,9 +1202,13 @@ static int __cpufreq_remove_dev(struct device *dev,
1205 free_cpumask_var(data->related_cpus); 1202 free_cpumask_var(data->related_cpus);
1206 free_cpumask_var(data->cpus); 1203 free_cpumask_var(data->cpus);
1207 kfree(data); 1204 kfree(data);
1208 } else if (cpufreq_driver->target) { 1205 } else {
1209 __cpufreq_governor(data, CPUFREQ_GOV_START); 1206 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1210 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); 1207 cpufreq_cpu_put(data);
1208 if (cpufreq_driver->target) {
1209 __cpufreq_governor(data, CPUFREQ_GOV_START);
1210 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1211 }
1211 } 1212 }
1212 1213
1213 per_cpu(cpufreq_policy_cpu, cpu) = -1; 1214 per_cpu(cpufreq_policy_cpu, cpu) = -1;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0ceb2eff5a7e..f97cb3d8c5a2 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
221 return count; 221 return count;
222} 222}
223 223
224static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, 224static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
225 size_t count) 225 const char *buf, size_t count)
226{ 226{
227 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 227 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
228 unsigned int input, j; 228 unsigned int input, j;
@@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
235 if (input > 1) 235 if (input > 1)
236 input = 1; 236 input = 1;
237 237
238 if (input == cs_tuners->ignore_nice) /* nothing to do */ 238 if (input == cs_tuners->ignore_nice_load) /* nothing to do */
239 return count; 239 return count;
240 240
241 cs_tuners->ignore_nice = input; 241 cs_tuners->ignore_nice_load = input;
242 242
243 /* we need to re-evaluate prev_cpu_idle */ 243 /* we need to re-evaluate prev_cpu_idle */
244 for_each_online_cpu(j) { 244 for_each_online_cpu(j) {
@@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
246 dbs_info = &per_cpu(cs_cpu_dbs_info, j); 246 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
247 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, 247 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
248 &dbs_info->cdbs.prev_cpu_wall, 0); 248 &dbs_info->cdbs.prev_cpu_wall, 0);
249 if (cs_tuners->ignore_nice) 249 if (cs_tuners->ignore_nice_load)
250 dbs_info->cdbs.prev_cpu_nice = 250 dbs_info->cdbs.prev_cpu_nice =
251 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 251 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
252 } 252 }
@@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate);
279show_store_one(cs, sampling_down_factor); 279show_store_one(cs, sampling_down_factor);
280show_store_one(cs, up_threshold); 280show_store_one(cs, up_threshold);
281show_store_one(cs, down_threshold); 281show_store_one(cs, down_threshold);
282show_store_one(cs, ignore_nice); 282show_store_one(cs, ignore_nice_load);
283show_store_one(cs, freq_step); 283show_store_one(cs, freq_step);
284declare_show_sampling_rate_min(cs); 284declare_show_sampling_rate_min(cs);
285 285
@@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate);
287gov_sys_pol_attr_rw(sampling_down_factor); 287gov_sys_pol_attr_rw(sampling_down_factor);
288gov_sys_pol_attr_rw(up_threshold); 288gov_sys_pol_attr_rw(up_threshold);
289gov_sys_pol_attr_rw(down_threshold); 289gov_sys_pol_attr_rw(down_threshold);
290gov_sys_pol_attr_rw(ignore_nice); 290gov_sys_pol_attr_rw(ignore_nice_load);
291gov_sys_pol_attr_rw(freq_step); 291gov_sys_pol_attr_rw(freq_step);
292gov_sys_pol_attr_ro(sampling_rate_min); 292gov_sys_pol_attr_ro(sampling_rate_min);
293 293
@@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
297 &sampling_down_factor_gov_sys.attr, 297 &sampling_down_factor_gov_sys.attr,
298 &up_threshold_gov_sys.attr, 298 &up_threshold_gov_sys.attr,
299 &down_threshold_gov_sys.attr, 299 &down_threshold_gov_sys.attr,
300 &ignore_nice_gov_sys.attr, 300 &ignore_nice_load_gov_sys.attr,
301 &freq_step_gov_sys.attr, 301 &freq_step_gov_sys.attr,
302 NULL 302 NULL
303}; 303};
@@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
313 &sampling_down_factor_gov_pol.attr, 313 &sampling_down_factor_gov_pol.attr,
314 &up_threshold_gov_pol.attr, 314 &up_threshold_gov_pol.attr,
315 &down_threshold_gov_pol.attr, 315 &down_threshold_gov_pol.attr,
316 &ignore_nice_gov_pol.attr, 316 &ignore_nice_load_gov_pol.attr,
317 &freq_step_gov_pol.attr, 317 &freq_step_gov_pol.attr,
318 NULL 318 NULL
319}; 319};
@@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data)
338 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; 338 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
339 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; 339 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
340 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; 340 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
341 tuners->ignore_nice = 0; 341 tuners->ignore_nice_load = 0;
342 tuners->freq_step = DEF_FREQUENCY_STEP; 342 tuners->freq_step = DEF_FREQUENCY_STEP;
343 343
344 dbs_data->tuners = tuners; 344 dbs_data->tuners = tuners;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 7b839a8db2a7..e59afaa9da23 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -47,9 +47,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
47 unsigned int j; 47 unsigned int j;
48 48
49 if (dbs_data->cdata->governor == GOV_ONDEMAND) 49 if (dbs_data->cdata->governor == GOV_ONDEMAND)
50 ignore_nice = od_tuners->ignore_nice; 50 ignore_nice = od_tuners->ignore_nice_load;
51 else 51 else
52 ignore_nice = cs_tuners->ignore_nice; 52 ignore_nice = cs_tuners->ignore_nice_load;
53 53
54 policy = cdbs->cur_policy; 54 policy = cdbs->cur_policy;
55 55
@@ -298,12 +298,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
298 cs_tuners = dbs_data->tuners; 298 cs_tuners = dbs_data->tuners;
299 cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); 299 cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
300 sampling_rate = cs_tuners->sampling_rate; 300 sampling_rate = cs_tuners->sampling_rate;
301 ignore_nice = cs_tuners->ignore_nice; 301 ignore_nice = cs_tuners->ignore_nice_load;
302 } else { 302 } else {
303 od_tuners = dbs_data->tuners; 303 od_tuners = dbs_data->tuners;
304 od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); 304 od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
305 sampling_rate = od_tuners->sampling_rate; 305 sampling_rate = od_tuners->sampling_rate;
306 ignore_nice = od_tuners->ignore_nice; 306 ignore_nice = od_tuners->ignore_nice_load;
307 od_ops = dbs_data->cdata->gov_ops; 307 od_ops = dbs_data->cdata->gov_ops;
308 io_busy = od_tuners->io_is_busy; 308 io_busy = od_tuners->io_is_busy;
309 } 309 }
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 6663ec3b3056..d5f12b4b11b8 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s {
165 165
166/* Per policy Governers sysfs tunables */ 166/* Per policy Governers sysfs tunables */
167struct od_dbs_tuners { 167struct od_dbs_tuners {
168 unsigned int ignore_nice; 168 unsigned int ignore_nice_load;
169 unsigned int sampling_rate; 169 unsigned int sampling_rate;
170 unsigned int sampling_down_factor; 170 unsigned int sampling_down_factor;
171 unsigned int up_threshold; 171 unsigned int up_threshold;
@@ -175,7 +175,7 @@ struct od_dbs_tuners {
175}; 175};
176 176
177struct cs_dbs_tuners { 177struct cs_dbs_tuners {
178 unsigned int ignore_nice; 178 unsigned int ignore_nice_load;
179 unsigned int sampling_rate; 179 unsigned int sampling_rate;
180 unsigned int sampling_down_factor; 180 unsigned int sampling_down_factor;
181 unsigned int up_threshold; 181 unsigned int up_threshold;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 93eb5cbcc1f6..c087347d6688 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
403 return count; 403 return count;
404} 404}
405 405
406static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, 406static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
407 size_t count) 407 const char *buf, size_t count)
408{ 408{
409 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 409 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
410 unsigned int input; 410 unsigned int input;
@@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
419 if (input > 1) 419 if (input > 1)
420 input = 1; 420 input = 1;
421 421
422 if (input == od_tuners->ignore_nice) { /* nothing to do */ 422 if (input == od_tuners->ignore_nice_load) { /* nothing to do */
423 return count; 423 return count;
424 } 424 }
425 od_tuners->ignore_nice = input; 425 od_tuners->ignore_nice_load = input;
426 426
427 /* we need to re-evaluate prev_cpu_idle */ 427 /* we need to re-evaluate prev_cpu_idle */
428 for_each_online_cpu(j) { 428 for_each_online_cpu(j) {
@@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
430 dbs_info = &per_cpu(od_cpu_dbs_info, j); 430 dbs_info = &per_cpu(od_cpu_dbs_info, j);
431 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, 431 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
432 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); 432 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
433 if (od_tuners->ignore_nice) 433 if (od_tuners->ignore_nice_load)
434 dbs_info->cdbs.prev_cpu_nice = 434 dbs_info->cdbs.prev_cpu_nice =
435 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 435 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
436 436
@@ -461,7 +461,7 @@ show_store_one(od, sampling_rate);
461show_store_one(od, io_is_busy); 461show_store_one(od, io_is_busy);
462show_store_one(od, up_threshold); 462show_store_one(od, up_threshold);
463show_store_one(od, sampling_down_factor); 463show_store_one(od, sampling_down_factor);
464show_store_one(od, ignore_nice); 464show_store_one(od, ignore_nice_load);
465show_store_one(od, powersave_bias); 465show_store_one(od, powersave_bias);
466declare_show_sampling_rate_min(od); 466declare_show_sampling_rate_min(od);
467 467
@@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate);
469gov_sys_pol_attr_rw(io_is_busy); 469gov_sys_pol_attr_rw(io_is_busy);
470gov_sys_pol_attr_rw(up_threshold); 470gov_sys_pol_attr_rw(up_threshold);
471gov_sys_pol_attr_rw(sampling_down_factor); 471gov_sys_pol_attr_rw(sampling_down_factor);
472gov_sys_pol_attr_rw(ignore_nice); 472gov_sys_pol_attr_rw(ignore_nice_load);
473gov_sys_pol_attr_rw(powersave_bias); 473gov_sys_pol_attr_rw(powersave_bias);
474gov_sys_pol_attr_ro(sampling_rate_min); 474gov_sys_pol_attr_ro(sampling_rate_min);
475 475
@@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
478 &sampling_rate_gov_sys.attr, 478 &sampling_rate_gov_sys.attr,
479 &up_threshold_gov_sys.attr, 479 &up_threshold_gov_sys.attr,
480 &sampling_down_factor_gov_sys.attr, 480 &sampling_down_factor_gov_sys.attr,
481 &ignore_nice_gov_sys.attr, 481 &ignore_nice_load_gov_sys.attr,
482 &powersave_bias_gov_sys.attr, 482 &powersave_bias_gov_sys.attr,
483 &io_is_busy_gov_sys.attr, 483 &io_is_busy_gov_sys.attr,
484 NULL 484 NULL
@@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
494 &sampling_rate_gov_pol.attr, 494 &sampling_rate_gov_pol.attr,
495 &up_threshold_gov_pol.attr, 495 &up_threshold_gov_pol.attr,
496 &sampling_down_factor_gov_pol.attr, 496 &sampling_down_factor_gov_pol.attr,
497 &ignore_nice_gov_pol.attr, 497 &ignore_nice_load_gov_pol.attr,
498 &powersave_bias_gov_pol.attr, 498 &powersave_bias_gov_pol.attr,
499 &io_is_busy_gov_pol.attr, 499 &io_is_busy_gov_pol.attr,
500 NULL 500 NULL
@@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data)
544 } 544 }
545 545
546 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; 546 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
547 tuners->ignore_nice = 0; 547 tuners->ignore_nice_load = 0;
548 tuners->powersave_bias = default_powersave_bias; 548 tuners->powersave_bias = default_powersave_bias;
549 tuners->io_is_busy = should_io_be_busy(); 549 tuners->io_is_busy = should_io_be_busy();
550 550
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b012d7600e1a..7cde885011ed 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -103,10 +103,10 @@ struct pstate_adjust_policy {
103static struct pstate_adjust_policy default_policy = { 103static struct pstate_adjust_policy default_policy = {
104 .sample_rate_ms = 10, 104 .sample_rate_ms = 10,
105 .deadband = 0, 105 .deadband = 0,
106 .setpoint = 109, 106 .setpoint = 97,
107 .p_gain_pct = 17, 107 .p_gain_pct = 20,
108 .d_gain_pct = 0, 108 .d_gain_pct = 0,
109 .i_gain_pct = 4, 109 .i_gain_pct = 0,
110}; 110};
111 111
112struct perf_limits { 112struct perf_limits {
@@ -468,12 +468,12 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
469{ 469{
470 int32_t busy_scaled; 470 int32_t busy_scaled;
471 int32_t core_busy, turbo_pstate, current_pstate; 471 int32_t core_busy, max_pstate, current_pstate;
472 472
473 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); 473 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
474 turbo_pstate = int_tofp(cpu->pstate.turbo_pstate); 474 max_pstate = int_tofp(cpu->pstate.max_pstate);
475 current_pstate = int_tofp(cpu->pstate.current_pstate); 475 current_pstate = int_tofp(cpu->pstate.current_pstate);
476 busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate)); 476 busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
477 477
478 return fp_toint(busy_scaled); 478 return fp_toint(busy_scaled);
479} 479}
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index bb838b985077..9536852c504a 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
118 clk_put(cpuclk); 118 clk_put(cpuclk);
119 return -EINVAL; 119 return -EINVAL;
120 } 120 }
121 ret = clk_set_rate(cpuclk, rate);
122 if (ret) {
123 clk_put(cpuclk);
124 return ret;
125 }
126 121
127 /* clock table init */ 122 /* clock table init */
128 for (i = 2; 123 for (i = 2;
@@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
130 i++) 125 i++)
131 loongson2_clockmod_table[i].frequency = (rate * i) / 8; 126 loongson2_clockmod_table[i].frequency = (rate * i) / 8;
132 127
128 ret = clk_set_rate(cpuclk, rate);
129 if (ret) {
130 clk_put(cpuclk);
131 return ret;
132 }
133
133 policy->cur = loongson2_cpufreq_get(policy->cpu); 134 policy->cur = loongson2_cpufreq_get(policy->cpu);
134 135
135 cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], 136 cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index fe343a06b7da..bc580b67a652 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -28,13 +28,6 @@
28#define MAX_INTERESTING 50000 28#define MAX_INTERESTING 50000
29#define STDDEV_THRESH 400 29#define STDDEV_THRESH 400
30 30
31/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
32#define MAX_DEVIATION 60
33
34static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
35static DEFINE_PER_CPU(int, hrtimer_status);
36/* menu hrtimer mode */
37enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
38 31
39/* 32/*
40 * Concepts and ideas behind the menu governor 33 * Concepts and ideas behind the menu governor
@@ -116,13 +109,6 @@ enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
116 * 109 *
117 */ 110 */
118 111
119/*
120 * The C-state residency is so long that is is worthwhile to exit
121 * from the shallow C-state and re-enter into a deeper C-state.
122 */
123static unsigned int perfect_cstate_ms __read_mostly = 30;
124module_param(perfect_cstate_ms, uint, 0000);
125
126struct menu_device { 112struct menu_device {
127 int last_state_idx; 113 int last_state_idx;
128 int needs_update; 114 int needs_update;
@@ -205,52 +191,17 @@ static u64 div_round64(u64 dividend, u32 divisor)
205 return div_u64(dividend + (divisor / 2), divisor); 191 return div_u64(dividend + (divisor / 2), divisor);
206} 192}
207 193
208/* Cancel the hrtimer if it is not triggered yet */
209void menu_hrtimer_cancel(void)
210{
211 int cpu = smp_processor_id();
212 struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
213
214 /* The timer is still not time out*/
215 if (per_cpu(hrtimer_status, cpu)) {
216 hrtimer_cancel(hrtmr);
217 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
218 }
219}
220EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
221
222/* Call back for hrtimer is triggered */
223static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
224{
225 int cpu = smp_processor_id();
226 struct menu_device *data = &per_cpu(menu_devices, cpu);
227
228 /* In general case, the expected residency is much larger than
229 * deepest C-state target residency, but prediction logic still
230 * predicts a small predicted residency, so the prediction
231 * history is totally broken if the timer is triggered.
232 * So reset the correction factor.
233 */
234 if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
235 data->correction_factor[data->bucket] = RESOLUTION * DECAY;
236
237 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
238
239 return HRTIMER_NORESTART;
240}
241
242/* 194/*
243 * Try detecting repeating patterns by keeping track of the last 8 195 * Try detecting repeating patterns by keeping track of the last 8
244 * intervals, and checking if the standard deviation of that set 196 * intervals, and checking if the standard deviation of that set
245 * of points is below a threshold. If it is... then use the 197 * of points is below a threshold. If it is... then use the
246 * average of these 8 points as the estimated value. 198 * average of these 8 points as the estimated value.
247 */ 199 */
248static u32 get_typical_interval(struct menu_device *data) 200static void get_typical_interval(struct menu_device *data)
249{ 201{
250 int i = 0, divisor = 0; 202 int i = 0, divisor = 0;
251 uint64_t max = 0, avg = 0, stddev = 0; 203 uint64_t max = 0, avg = 0, stddev = 0;
252 int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ 204 int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
253 unsigned int ret = 0;
254 205
255again: 206again:
256 207
@@ -291,16 +242,13 @@ again:
291 if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) 242 if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
292 || stddev <= 20) { 243 || stddev <= 20) {
293 data->predicted_us = avg; 244 data->predicted_us = avg;
294 ret = 1; 245 return;
295 return ret;
296 246
297 } else if ((divisor * 4) > INTERVALS * 3) { 247 } else if ((divisor * 4) > INTERVALS * 3) {
298 /* Exclude the max interval */ 248 /* Exclude the max interval */
299 thresh = max - 1; 249 thresh = max - 1;
300 goto again; 250 goto again;
301 } 251 }
302
303 return ret;
304} 252}
305 253
306/** 254/**
@@ -315,9 +263,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
315 int i; 263 int i;
316 int multiplier; 264 int multiplier;
317 struct timespec t; 265 struct timespec t;
318 int repeat = 0, low_predicted = 0;
319 int cpu = smp_processor_id();
320 struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
321 266
322 if (data->needs_update) { 267 if (data->needs_update) {
323 menu_update(drv, dev); 268 menu_update(drv, dev);
@@ -352,7 +297,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
352 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], 297 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
353 RESOLUTION * DECAY); 298 RESOLUTION * DECAY);
354 299
355 repeat = get_typical_interval(data); 300 get_typical_interval(data);
356 301
357 /* 302 /*
358 * We want to default to C1 (hlt), not to busy polling 303 * We want to default to C1 (hlt), not to busy polling
@@ -373,10 +318,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
373 318
374 if (s->disabled || su->disable) 319 if (s->disabled || su->disable)
375 continue; 320 continue;
376 if (s->target_residency > data->predicted_us) { 321 if (s->target_residency > data->predicted_us)
377 low_predicted = 1;
378 continue; 322 continue;
379 }
380 if (s->exit_latency > latency_req) 323 if (s->exit_latency > latency_req)
381 continue; 324 continue;
382 if (s->exit_latency * multiplier > data->predicted_us) 325 if (s->exit_latency * multiplier > data->predicted_us)
@@ -386,44 +329,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
386 data->exit_us = s->exit_latency; 329 data->exit_us = s->exit_latency;
387 } 330 }
388 331
389 /* not deepest C-state chosen for low predicted residency */
390 if (low_predicted) {
391 unsigned int timer_us = 0;
392 unsigned int perfect_us = 0;
393
394 /*
395 * Set a timer to detect whether this sleep is much
396 * longer than repeat mode predicted. If the timer
397 * triggers, the code will evaluate whether to put
398 * the CPU into a deeper C-state.
399 * The timer is cancelled on CPU wakeup.
400 */
401 timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
402
403 perfect_us = perfect_cstate_ms * 1000;
404
405 if (repeat && (4 * timer_us < data->expected_us)) {
406 RCU_NONIDLE(hrtimer_start(hrtmr,
407 ns_to_ktime(1000 * timer_us),
408 HRTIMER_MODE_REL_PINNED));
409 /* In repeat case, menu hrtimer is started */
410 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
411 } else if (perfect_us < data->expected_us) {
412 /*
413 * The next timer is long. This could be because
414 * we did not make a useful prediction.
415 * In that case, it makes sense to re-enter
416 * into a deeper C-state after some time.
417 */
418 RCU_NONIDLE(hrtimer_start(hrtmr,
419 ns_to_ktime(1000 * timer_us),
420 HRTIMER_MODE_REL_PINNED));
421 /* In general case, menu hrtimer is started */
422 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
423 }
424
425 }
426
427 return data->last_state_idx; 332 return data->last_state_idx;
428} 333}
429 334
@@ -514,9 +419,6 @@ static int menu_enable_device(struct cpuidle_driver *drv,
514 struct cpuidle_device *dev) 419 struct cpuidle_device *dev)
515{ 420{
516 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 421 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
517 struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
518 hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
519 t->function = menu_hrtimer_notify;
520 422
521 memset(data, 0, sizeof(struct menu_device)); 423 memset(data, 0, sizeof(struct menu_device));
522 424
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 5996521a1caf..84573b4d6f92 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -429,7 +429,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
429 dma_addr_t src_dma, dst_dma; 429 dma_addr_t src_dma, dst_dma;
430 int ret = 0; 430 int ret = 0;
431 431
432 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 432 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
433 if (!desc) { 433 if (!desc) {
434 dev_err(jrdev, "unable to allocate key input memory\n"); 434 dev_err(jrdev, "unable to allocate key input memory\n");
435 return -ENOMEM; 435 return -ENOMEM;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index ce3dc3e9688c..0bbdea5059f3 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -867,6 +867,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
867 867
868 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 868 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
869 dev_err(&pdev->dev, "Cannot find proper base address\n"); 869 dev_err(&pdev->dev, "Cannot find proper base address\n");
870 err = -ENODEV;
870 goto err_disable_pdev; 871 goto err_disable_pdev;
871 } 872 }
872 873
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 593827b3fdd4..fa645d825009 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2505,6 +2505,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2505 /* Assign cookies to all nodes */ 2505 /* Assign cookies to all nodes */
2506 while (!list_empty(&last->node)) { 2506 while (!list_empty(&last->node)) {
2507 desc = list_entry(last->node.next, struct dma_pl330_desc, node); 2507 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2508 if (pch->cyclic) {
2509 desc->txd.callback = last->txd.callback;
2510 desc->txd.callback_param = last->txd.callback_param;
2511 }
2508 2512
2509 dma_cookie_assign(&desc->txd); 2513 dma_cookie_assign(&desc->txd);
2510 2514
@@ -2688,45 +2692,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2688 size_t period_len, enum dma_transfer_direction direction, 2692 size_t period_len, enum dma_transfer_direction direction,
2689 unsigned long flags, void *context) 2693 unsigned long flags, void *context)
2690{ 2694{
2691 struct dma_pl330_desc *desc; 2695 struct dma_pl330_desc *desc = NULL, *first = NULL;
2692 struct dma_pl330_chan *pch = to_pchan(chan); 2696 struct dma_pl330_chan *pch = to_pchan(chan);
2697 struct dma_pl330_dmac *pdmac = pch->dmac;
2698 unsigned int i;
2693 dma_addr_t dst; 2699 dma_addr_t dst;
2694 dma_addr_t src; 2700 dma_addr_t src;
2695 2701
2696 desc = pl330_get_desc(pch); 2702 if (len % period_len != 0)
2697 if (!desc) {
2698 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2699 __func__, __LINE__);
2700 return NULL; 2703 return NULL;
2701 }
2702 2704
2703 switch (direction) { 2705 if (!is_slave_direction(direction)) {
2704 case DMA_MEM_TO_DEV:
2705 desc->rqcfg.src_inc = 1;
2706 desc->rqcfg.dst_inc = 0;
2707 desc->req.rqtype = MEMTODEV;
2708 src = dma_addr;
2709 dst = pch->fifo_addr;
2710 break;
2711 case DMA_DEV_TO_MEM:
2712 desc->rqcfg.src_inc = 0;
2713 desc->rqcfg.dst_inc = 1;
2714 desc->req.rqtype = DEVTOMEM;
2715 src = pch->fifo_addr;
2716 dst = dma_addr;
2717 break;
2718 default:
2719 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", 2706 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
2720 __func__, __LINE__); 2707 __func__, __LINE__);
2721 return NULL; 2708 return NULL;
2722 } 2709 }
2723 2710
2724 desc->rqcfg.brst_size = pch->burst_sz; 2711 for (i = 0; i < len / period_len; i++) {
2725 desc->rqcfg.brst_len = 1; 2712 desc = pl330_get_desc(pch);
2713 if (!desc) {
2714 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2715 __func__, __LINE__);
2726 2716
2727 pch->cyclic = true; 2717 if (!first)
2718 return NULL;
2719
2720 spin_lock_irqsave(&pdmac->pool_lock, flags);
2721
2722 while (!list_empty(&first->node)) {
2723 desc = list_entry(first->node.next,
2724 struct dma_pl330_desc, node);
2725 list_move_tail(&desc->node, &pdmac->desc_pool);
2726 }
2727
2728 list_move_tail(&first->node, &pdmac->desc_pool);
2728 2729
2729 fill_px(&desc->px, dst, src, period_len); 2730 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2731
2732 return NULL;
2733 }
2734
2735 switch (direction) {
2736 case DMA_MEM_TO_DEV:
2737 desc->rqcfg.src_inc = 1;
2738 desc->rqcfg.dst_inc = 0;
2739 desc->req.rqtype = MEMTODEV;
2740 src = dma_addr;
2741 dst = pch->fifo_addr;
2742 break;
2743 case DMA_DEV_TO_MEM:
2744 desc->rqcfg.src_inc = 0;
2745 desc->rqcfg.dst_inc = 1;
2746 desc->req.rqtype = DEVTOMEM;
2747 src = pch->fifo_addr;
2748 dst = dma_addr;
2749 break;
2750 default:
2751 break;
2752 }
2753
2754 desc->rqcfg.brst_size = pch->burst_sz;
2755 desc->rqcfg.brst_len = 1;
2756 fill_px(&desc->px, dst, src, period_len);
2757
2758 if (!first)
2759 first = desc;
2760 else
2761 list_add_tail(&desc->node, &first->node);
2762
2763 dma_addr += period_len;
2764 }
2765
2766 if (!desc)
2767 return NULL;
2768
2769 pch->cyclic = true;
2770 desc->txd.flags = flags;
2730 2771
2731 return &desc->txd; 2772 return &desc->txd;
2732} 2773}
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index b67f45f5c271..5039fbc88254 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -400,8 +400,8 @@ static size_t sh_dmae_get_partial(struct shdma_chan *schan,
400 shdma_chan); 400 shdma_chan);
401 struct sh_dmae_desc *sh_desc = container_of(sdesc, 401 struct sh_dmae_desc *sh_desc = container_of(sdesc,
402 struct sh_dmae_desc, shdma_desc); 402 struct sh_dmae_desc, shdma_desc);
403 return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 403 return sh_desc->hw.tcr -
404 sh_chan->xmit_shift; 404 (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
405} 405}
406 406
407/* Called from error IRQ or NMI */ 407/* Called from error IRQ or NMI */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 27e86d938262..89e109022d78 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -48,6 +48,8 @@ static LIST_HEAD(mc_devices);
48 */ 48 */
49static void const *edac_mc_owner; 49static void const *edac_mc_owner;
50 50
51static struct bus_type mc_bus[EDAC_MAX_MCS];
52
51unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, 53unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
52 unsigned len) 54 unsigned len)
53{ 55{
@@ -723,6 +725,11 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
723 int ret = -EINVAL; 725 int ret = -EINVAL;
724 edac_dbg(0, "\n"); 726 edac_dbg(0, "\n");
725 727
728 if (mci->mc_idx >= EDAC_MAX_MCS) {
729 pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
730 return -ENODEV;
731 }
732
726#ifdef CONFIG_EDAC_DEBUG 733#ifdef CONFIG_EDAC_DEBUG
727 if (edac_debug_level >= 3) 734 if (edac_debug_level >= 3)
728 edac_mc_dump_mci(mci); 735 edac_mc_dump_mci(mci);
@@ -762,6 +769,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
762 /* set load time so that error rate can be tracked */ 769 /* set load time so that error rate can be tracked */
763 mci->start_time = jiffies; 770 mci->start_time = jiffies;
764 771
772 mci->bus = &mc_bus[mci->mc_idx];
773
765 if (edac_create_sysfs_mci_device(mci)) { 774 if (edac_create_sysfs_mci_device(mci)) {
766 edac_mc_printk(mci, KERN_WARNING, 775 edac_mc_printk(mci, KERN_WARNING,
767 "failed to create sysfs device\n"); 776 "failed to create sysfs device\n");
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ef15a7e613bc..e7c32c4f7837 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -370,7 +370,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
370 return -ENODEV; 370 return -ENODEV;
371 371
372 csrow->dev.type = &csrow_attr_type; 372 csrow->dev.type = &csrow_attr_type;
373 csrow->dev.bus = &mci->bus; 373 csrow->dev.bus = mci->bus;
374 device_initialize(&csrow->dev); 374 device_initialize(&csrow->dev);
375 csrow->dev.parent = &mci->dev; 375 csrow->dev.parent = &mci->dev;
376 csrow->mci = mci; 376 csrow->mci = mci;
@@ -605,7 +605,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
605 dimm->mci = mci; 605 dimm->mci = mci;
606 606
607 dimm->dev.type = &dimm_attr_type; 607 dimm->dev.type = &dimm_attr_type;
608 dimm->dev.bus = &mci->bus; 608 dimm->dev.bus = mci->bus;
609 device_initialize(&dimm->dev); 609 device_initialize(&dimm->dev);
610 610
611 dimm->dev.parent = &mci->dev; 611 dimm->dev.parent = &mci->dev;
@@ -975,11 +975,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
975 * The memory controller needs its own bus, in order to avoid 975 * The memory controller needs its own bus, in order to avoid
976 * namespace conflicts at /sys/bus/edac. 976 * namespace conflicts at /sys/bus/edac.
977 */ 977 */
978 mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); 978 mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
979 if (!mci->bus.name) 979 if (!mci->bus->name)
980 return -ENOMEM; 980 return -ENOMEM;
981 edac_dbg(0, "creating bus %s\n", mci->bus.name); 981
982 err = bus_register(&mci->bus); 982 edac_dbg(0, "creating bus %s\n", mci->bus->name);
983
984 err = bus_register(mci->bus);
983 if (err < 0) 985 if (err < 0)
984 return err; 986 return err;
985 987
@@ -988,7 +990,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
988 device_initialize(&mci->dev); 990 device_initialize(&mci->dev);
989 991
990 mci->dev.parent = mci_pdev; 992 mci->dev.parent = mci_pdev;
991 mci->dev.bus = &mci->bus; 993 mci->dev.bus = mci->bus;
992 dev_set_name(&mci->dev, "mc%d", mci->mc_idx); 994 dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
993 dev_set_drvdata(&mci->dev, mci); 995 dev_set_drvdata(&mci->dev, mci);
994 pm_runtime_forbid(&mci->dev); 996 pm_runtime_forbid(&mci->dev);
@@ -997,8 +999,8 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
997 err = device_add(&mci->dev); 999 err = device_add(&mci->dev);
998 if (err < 0) { 1000 if (err < 0) {
999 edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); 1001 edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
1000 bus_unregister(&mci->bus); 1002 bus_unregister(mci->bus);
1001 kfree(mci->bus.name); 1003 kfree(mci->bus->name);
1002 return err; 1004 return err;
1003 } 1005 }
1004 1006
@@ -1064,8 +1066,8 @@ fail:
1064 } 1066 }
1065fail2: 1067fail2:
1066 device_unregister(&mci->dev); 1068 device_unregister(&mci->dev);
1067 bus_unregister(&mci->bus); 1069 bus_unregister(mci->bus);
1068 kfree(mci->bus.name); 1070 kfree(mci->bus->name);
1069 return err; 1071 return err;
1070} 1072}
1071 1073
@@ -1098,8 +1100,8 @@ void edac_unregister_sysfs(struct mem_ctl_info *mci)
1098{ 1100{
1099 edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); 1101 edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1100 device_unregister(&mci->dev); 1102 device_unregister(&mci->dev);
1101 bus_unregister(&mci->bus); 1103 bus_unregister(mci->bus);
1102 kfree(mci->bus.name); 1104 kfree(mci->bus->name);
1103} 1105}
1104 1106
1105static void mc_attr_release(struct device *dev) 1107static void mc_attr_release(struct device *dev)
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 1b635178cc44..157b934e8ce3 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -974,7 +974,7 @@ static int i5100_setup_debugfs(struct mem_ctl_info *mci)
974 if (!i5100_debugfs) 974 if (!i5100_debugfs)
975 return -ENODEV; 975 return -ENODEV;
976 976
977 priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs); 977 priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs);
978 978
979 if (!priv->debugfs) 979 if (!priv->debugfs)
980 return -ENOMEM; 980 return -ENOMEM;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 7ef316fdc4d9..ac1b43a04285 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -54,6 +54,7 @@
54#define FW_CDEV_KERNEL_VERSION 5 54#define FW_CDEV_KERNEL_VERSION 5
55#define FW_CDEV_VERSION_EVENT_REQUEST2 4 55#define FW_CDEV_VERSION_EVENT_REQUEST2 4
56#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 56#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
57#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
57 58
58struct client { 59struct client {
59 u32 version; 60 u32 version;
@@ -1005,6 +1006,8 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
1005 a->channel, a->speed, a->header_size, cb, client); 1006 a->channel, a->speed, a->header_size, cb, client);
1006 if (IS_ERR(context)) 1007 if (IS_ERR(context))
1007 return PTR_ERR(context); 1008 return PTR_ERR(context);
1009 if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
1010 context->drop_overflow_headers = true;
1008 1011
1009 /* We only support one context at this time. */ 1012 /* We only support one context at this time. */
1010 spin_lock_irq(&client->lock); 1013 spin_lock_irq(&client->lock);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9e1db6490b9a..afb701ec90ca 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2749,8 +2749,11 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2749{ 2749{
2750 u32 *ctx_hdr; 2750 u32 *ctx_hdr;
2751 2751
2752 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) 2752 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
2753 if (ctx->base.drop_overflow_headers)
2754 return;
2753 flush_iso_completions(ctx); 2755 flush_iso_completions(ctx);
2756 }
2754 2757
2755 ctx_hdr = ctx->header + ctx->header_length; 2758 ctx_hdr = ctx->header + ctx->header_length;
2756 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); 2759 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
@@ -2910,8 +2913,11 @@ static int handle_it_packet(struct context *context,
2910 2913
2911 sync_it_packet_for_cpu(context, d); 2914 sync_it_packet_for_cpu(context, d);
2912 2915
2913 if (ctx->header_length + 4 > PAGE_SIZE) 2916 if (ctx->header_length + 4 > PAGE_SIZE) {
2917 if (ctx->base.drop_overflow_headers)
2918 return 1;
2914 flush_iso_completions(ctx); 2919 flush_iso_completions(ctx);
2920 }
2915 2921
2916 ctx_hdr = ctx->header + ctx->header_length; 2922 ctx_hdr = ctx->header + ctx->header_length;
2917 ctx->last_timestamp = le16_to_cpu(last->res_count); 2923 ctx->last_timestamp = le16_to_cpu(last->res_count);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index eb760a218da4..232fa8fce26a 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -419,6 +419,13 @@ static void __init dmi_format_ids(char *buf, size_t len)
419 dmi_get_system_info(DMI_BIOS_DATE)); 419 dmi_get_system_info(DMI_BIOS_DATE));
420} 420}
421 421
422/*
423 * Check for DMI/SMBIOS headers in the system firmware image. Any
424 * SMBIOS header must start 16 bytes before the DMI header, so take a
425 * 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
426 * 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS
427 * takes precedence) and return 0. Otherwise return 1.
428 */
422static int __init dmi_present(const u8 *buf) 429static int __init dmi_present(const u8 *buf)
423{ 430{
424 int smbios_ver; 431 int smbios_ver;
@@ -506,6 +513,13 @@ void __init dmi_scan_machine(void)
506 if (p == NULL) 513 if (p == NULL)
507 goto error; 514 goto error;
508 515
516 /*
517 * Iterate over all possible DMI header addresses q.
518 * Maintain the 32 bytes around q in buf. On the
519 * first iteration, substitute zero for the
520 * out-of-range bytes so there is no chance of falsely
521 * detecting an SMBIOS header.
522 */
509 memset(buf, 0, 16); 523 memset(buf, 0, 16);
510 for (q = p; q < p + 0x10000; q += 16) { 524 for (q = p; q < p + 0x10000; q += 16) {
511 memcpy_fromio(buf + 16, q, 16); 525 memcpy_fromio(buf + 16, q, 16);
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
index e3ceaacde45c..73b73969d361 100644
--- a/drivers/gpio/gpio-msm-v1.c
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/err.h>
24 25
25#include <mach/msm_gpiomux.h> 26#include <mach/msm_gpiomux.h>
26 27
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index f4491a497cc8..c2fa77086eb5 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -378,7 +378,7 @@ static int msm_gpio_probe(struct platform_device *pdev)
378 int ret, ngpio; 378 int ret, ngpio;
379 struct resource *res; 379 struct resource *res;
380 380
381 if (!of_property_read_u32(pdev->dev.of_node, "ngpio", &ngpio)) { 381 if (of_property_read_u32(pdev->dev.of_node, "ngpio", &ngpio)) {
382 dev_err(&pdev->dev, "%s: ngpio property missing\n", __func__); 382 dev_err(&pdev->dev, "%s: ngpio property missing\n", __func__);
383 return -EINVAL; 383 return -EINVAL;
384 } 384 }
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index a7c54c843291..955555d6ec88 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
6# 6#
7menuconfig DRM 7menuconfig DRM
8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
10 select HDMI 10 select HDMI
11 select I2C 11 select I2C
12 select I2C_ALGOBIT 12 select I2C_ALGOBIT
@@ -168,6 +168,17 @@ config DRM_I915_KMS
168 the driver to bind to PCI devices, which precludes loading things 168 the driver to bind to PCI devices, which precludes loading things
169 like intelfb. 169 like intelfb.
170 170
171config DRM_I915_PRELIMINARY_HW_SUPPORT
172 bool "Enable preliminary support for prerelease Intel hardware by default"
173 depends on DRM_I915
174 help
175 Choose this option if you have prerelease Intel hardware and want the
176 i915 driver to support it by default. You can enable such support at
177 runtime with the module option i915.preliminary_hw_support=1; this
178 option changes the default for that module option.
179
180 If in doubt, say "N".
181
171config DRM_MGA 182config DRM_MGA
172 tristate "Matrox g200/g400" 183 tristate "Matrox g200/g400"
173 depends on DRM && PCI 184 depends on DRM && PCI
@@ -223,3 +234,5 @@ source "drivers/gpu/drm/omapdrm/Kconfig"
223source "drivers/gpu/drm/tilcdc/Kconfig" 234source "drivers/gpu/drm/tilcdc/Kconfig"
224 235
225source "drivers/gpu/drm/qxl/Kconfig" 236source "drivers/gpu/drm/qxl/Kconfig"
237
238source "drivers/gpu/drm/msm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 801bcafa3028..f089adfe70ee 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -7,13 +7,13 @@ ccflags-y := -Iinclude/drm
7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ 7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
8 drm_context.o drm_dma.o \ 8 drm_context.o drm_dma.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o drm_pci.o \
12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
13 drm_crtc.o drm_modes.o drm_edid.o \ 13 drm_crtc.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
15 drm_trace_points.o drm_global.o drm_prime.o \ 15 drm_trace_points.o drm_global.o drm_prime.o \
16 drm_rect.o 16 drm_rect.o drm_vma_manager.o drm_flip_work.o
17 17
18drm-$(CONFIG_COMPAT) += drm_ioc32.o 18drm-$(CONFIG_COMPAT) += drm_ioc32.o
19drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 19drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -54,4 +54,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
54obj-$(CONFIG_DRM_OMAP) += omapdrm/ 54obj-$(CONFIG_DRM_OMAP) += omapdrm/
55obj-$(CONFIG_DRM_TILCDC) += tilcdc/ 55obj-$(CONFIG_DRM_TILCDC) += tilcdc/
56obj-$(CONFIG_DRM_QXL) += qxl/ 56obj-$(CONFIG_DRM_QXL) += qxl/
57obj-$(CONFIG_DRM_MSM) += msm/
57obj-y += i2c/ 58obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index df0d0a08097a..32e270dc714e 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -190,7 +190,6 @@ static const struct file_operations ast_fops = {
190 .unlocked_ioctl = drm_ioctl, 190 .unlocked_ioctl = drm_ioctl,
191 .mmap = ast_mmap, 191 .mmap = ast_mmap,
192 .poll = drm_poll, 192 .poll = drm_poll,
193 .fasync = drm_fasync,
194#ifdef CONFIG_COMPAT 193#ifdef CONFIG_COMPAT
195 .compat_ioctl = drm_compat_ioctl, 194 .compat_ioctl = drm_compat_ioctl,
196#endif 195#endif
@@ -198,7 +197,7 @@ static const struct file_operations ast_fops = {
198}; 197};
199 198
200static struct drm_driver driver = { 199static struct drm_driver driver = {
201 .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM, 200 .driver_features = DRIVER_MODESET | DRIVER_GEM,
202 .dev_priv_size = 0, 201 .dev_priv_size = 0,
203 202
204 .load = ast_driver_load, 203 .load = ast_driver_load,
@@ -216,7 +215,7 @@ static struct drm_driver driver = {
216 .gem_free_object = ast_gem_free_object, 215 .gem_free_object = ast_gem_free_object,
217 .dumb_create = ast_dumb_create, 216 .dumb_create = ast_dumb_create,
218 .dumb_map_offset = ast_dumb_mmap_offset, 217 .dumb_map_offset = ast_dumb_mmap_offset,
219 .dumb_destroy = ast_dumb_destroy, 218 .dumb_destroy = drm_gem_dumb_destroy,
220 219
221}; 220};
222 221
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 622d4ae7eb9e..796dbb212a41 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
322extern int ast_dumb_create(struct drm_file *file, 322extern int ast_dumb_create(struct drm_file *file,
323 struct drm_device *dev, 323 struct drm_device *dev,
324 struct drm_mode_create_dumb *args); 324 struct drm_mode_create_dumb *args);
325extern int ast_dumb_destroy(struct drm_file *file,
326 struct drm_device *dev,
327 uint32_t handle);
328 325
329extern int ast_gem_init_object(struct drm_gem_object *obj); 326extern int ast_gem_init_object(struct drm_gem_object *obj);
330extern void ast_gem_free_object(struct drm_gem_object *obj); 327extern void ast_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f60fd7bd1183..7f6152d374ca 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
449 return 0; 449 return 0;
450} 450}
451 451
452int ast_dumb_destroy(struct drm_file *file,
453 struct drm_device *dev,
454 uint32_t handle)
455{
456 return drm_gem_handle_delete(file, handle);
457}
458
459int ast_gem_init_object(struct drm_gem_object *obj) 452int ast_gem_init_object(struct drm_gem_object *obj)
460{ 453{
461 BUG(); 454 BUG();
@@ -487,7 +480,7 @@ void ast_gem_free_object(struct drm_gem_object *obj)
487 480
488static inline u64 ast_bo_mmap_offset(struct ast_bo *bo) 481static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
489{ 482{
490 return bo->bo.addr_space_offset; 483 return drm_vma_node_offset_addr(&bo->bo.vma_node);
491} 484}
492int 485int
493ast_dumb_mmap_offset(struct drm_file *file, 486ast_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 98d670825a1a..32aecb34dbce 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -148,7 +148,9 @@ ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
148 148
149static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 149static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{ 150{
151 return 0; 151 struct ast_bo *astbo = ast_bo(bo);
152
153 return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
152} 154}
153 155
154static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 156static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,8 +323,8 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
321 return ret; 323 return ret;
322 } 324 }
323 325
324 astbo->gem.driver_private = NULL;
325 astbo->bo.bdev = &ast->ttm.bdev; 326 astbo->bo.bdev = &ast->ttm.bdev;
327 astbo->bo.bdev->dev_mapping = dev->dev_mapping;
326 328
327 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 329 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
328 330
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 8ecb601152ef..138364d91782 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -85,10 +85,9 @@ static const struct file_operations cirrus_driver_fops = {
85#ifdef CONFIG_COMPAT 85#ifdef CONFIG_COMPAT
86 .compat_ioctl = drm_compat_ioctl, 86 .compat_ioctl = drm_compat_ioctl,
87#endif 87#endif
88 .fasync = drm_fasync,
89}; 88};
90static struct drm_driver driver = { 89static struct drm_driver driver = {
91 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR, 90 .driver_features = DRIVER_MODESET | DRIVER_GEM,
92 .load = cirrus_driver_load, 91 .load = cirrus_driver_load,
93 .unload = cirrus_driver_unload, 92 .unload = cirrus_driver_unload,
94 .fops = &cirrus_driver_fops, 93 .fops = &cirrus_driver_fops,
@@ -102,7 +101,7 @@ static struct drm_driver driver = {
102 .gem_free_object = cirrus_gem_free_object, 101 .gem_free_object = cirrus_gem_free_object,
103 .dumb_create = cirrus_dumb_create, 102 .dumb_create = cirrus_dumb_create,
104 .dumb_map_offset = cirrus_dumb_mmap_offset, 103 .dumb_map_offset = cirrus_dumb_mmap_offset,
105 .dumb_destroy = cirrus_dumb_destroy, 104 .dumb_destroy = drm_gem_dumb_destroy,
106}; 105};
107 106
108static struct pci_driver cirrus_pci_driver = { 107static struct pci_driver cirrus_pci_driver = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index bae55609e6c3..9b0bb9184afd 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
203int cirrus_dumb_create(struct drm_file *file, 203int cirrus_dumb_create(struct drm_file *file,
204 struct drm_device *dev, 204 struct drm_device *dev,
205 struct drm_mode_create_dumb *args); 205 struct drm_mode_create_dumb *args);
206int cirrus_dumb_destroy(struct drm_file *file,
207 struct drm_device *dev,
208 uint32_t handle);
209 206
210int cirrus_framebuffer_init(struct drm_device *dev, 207int cirrus_framebuffer_init(struct drm_device *dev,
211 struct cirrus_framebuffer *gfb, 208 struct cirrus_framebuffer *gfb,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 35cbae827771..f130a533a512 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
255 return 0; 255 return 0;
256} 256}
257 257
258int cirrus_dumb_destroy(struct drm_file *file,
259 struct drm_device *dev,
260 uint32_t handle)
261{
262 return drm_gem_handle_delete(file, handle);
263}
264
265int cirrus_gem_init_object(struct drm_gem_object *obj) 258int cirrus_gem_init_object(struct drm_gem_object *obj)
266{ 259{
267 BUG(); 260 BUG();
@@ -294,7 +287,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj)
294 287
295static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo) 288static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
296{ 289{
297 return bo->bo.addr_space_offset; 290 return drm_vma_node_offset_addr(&bo->bo.vma_node);
298} 291}
299 292
300int 293int
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 0047012045c2..75becdeac07d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -148,7 +148,9 @@ cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
148 148
149static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 149static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{ 150{
151 return 0; 151 struct cirrus_bo *cirrusbo = cirrus_bo(bo);
152
153 return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
152} 154}
153 155
154static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 156static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -326,8 +328,8 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
326 return ret; 328 return ret;
327 } 329 }
328 330
329 cirrusbo->gem.driver_private = NULL;
330 cirrusbo->bo.bdev = &cirrus->ttm.bdev; 331 cirrusbo->bo.bdev = &cirrus->ttm.bdev;
332 cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
331 333
332 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 334 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
333 335
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 3d8fed179797..e301d653d97e 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -424,6 +424,57 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
424} 424}
425 425
426/** 426/**
427 * drm_agp_clear - Clear AGP resource list
428 * @dev: DRM device
429 *
430 * Iterate over all AGP resources and remove them. But keep the AGP head
431 * intact so it can still be used. It is safe to call this if AGP is disabled or
432 * was already removed.
433 *
434 * If DRIVER_MODESET is active, nothing is done to protect the modesetting
435 * resources from getting destroyed. Drivers are responsible of cleaning them up
436 * during device shutdown.
437 */
438void drm_agp_clear(struct drm_device *dev)
439{
440 struct drm_agp_mem *entry, *tempe;
441
442 if (!drm_core_has_AGP(dev) || !dev->agp)
443 return;
444 if (drm_core_check_feature(dev, DRIVER_MODESET))
445 return;
446
447 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
448 if (entry->bound)
449 drm_unbind_agp(entry->memory);
450 drm_free_agp(entry->memory, entry->pages);
451 kfree(entry);
452 }
453 INIT_LIST_HEAD(&dev->agp->memory);
454
455 if (dev->agp->acquired)
456 drm_agp_release(dev);
457
458 dev->agp->acquired = 0;
459 dev->agp->enabled = 0;
460}
461
462/**
463 * drm_agp_destroy - Destroy AGP head
464 * @dev: DRM device
465 *
466 * Destroy resources that were previously allocated via drm_agp_initp. Caller
467 * must ensure to clean up all AGP resources before calling this. See
468 * drm_agp_clear().
469 *
470 * Call this to destroy AGP heads allocated via drm_agp_init().
471 */
472void drm_agp_destroy(struct drm_agp_head *agp)
473{
474 kfree(agp);
475}
476
477/**
427 * Binds a collection of pages into AGP memory at the given offset, returning 478 * Binds a collection of pages into AGP memory at the given offset, returning
428 * the AGP memory structure containing them. 479 * the AGP memory structure containing them.
429 * 480 *
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 5a4dbb410b71..471e051d295e 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -207,12 +207,10 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
207 return 0; 207 return 0;
208 } 208 }
209 209
210 if (drm_core_has_MTRR(dev)) { 210 if (map->type == _DRM_FRAME_BUFFER ||
211 if (map->type == _DRM_FRAME_BUFFER || 211 (map->flags & _DRM_WRITE_COMBINING)) {
212 (map->flags & _DRM_WRITE_COMBINING)) { 212 map->mtrr =
213 map->mtrr = 213 arch_phys_wc_add(map->offset, map->size);
214 arch_phys_wc_add(map->offset, map->size);
215 }
216 } 214 }
217 if (map->type == _DRM_REGISTERS) { 215 if (map->type == _DRM_REGISTERS) {
218 if (map->flags & _DRM_WRITE_COMBINING) 216 if (map->flags & _DRM_WRITE_COMBINING)
@@ -243,7 +241,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
243 } 241 }
244 map->handle = vmalloc_user(map->size); 242 map->handle = vmalloc_user(map->size);
245 DRM_DEBUG("%lu %d %p\n", 243 DRM_DEBUG("%lu %d %p\n",
246 map->size, drm_order(map->size), map->handle); 244 map->size, order_base_2(map->size), map->handle);
247 if (!map->handle) { 245 if (!map->handle) {
248 kfree(map); 246 kfree(map);
249 return -ENOMEM; 247 return -ENOMEM;
@@ -464,8 +462,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
464 iounmap(map->handle); 462 iounmap(map->handle);
465 /* FALLTHROUGH */ 463 /* FALLTHROUGH */
466 case _DRM_FRAME_BUFFER: 464 case _DRM_FRAME_BUFFER:
467 if (drm_core_has_MTRR(dev)) 465 arch_phys_wc_del(map->mtrr);
468 arch_phys_wc_del(map->mtrr);
469 break; 466 break;
470 case _DRM_SHM: 467 case _DRM_SHM:
471 vfree(map->handle); 468 vfree(map->handle);
@@ -630,7 +627,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
630 return -EINVAL; 627 return -EINVAL;
631 628
632 count = request->count; 629 count = request->count;
633 order = drm_order(request->size); 630 order = order_base_2(request->size);
634 size = 1 << order; 631 size = 1 << order;
635 632
636 alignment = (request->flags & _DRM_PAGE_ALIGN) 633 alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -800,7 +797,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
800 return -EPERM; 797 return -EPERM;
801 798
802 count = request->count; 799 count = request->count;
803 order = drm_order(request->size); 800 order = order_base_2(request->size);
804 size = 1 << order; 801 size = 1 << order;
805 802
806 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 803 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
@@ -1002,7 +999,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1002 return -EPERM; 999 return -EPERM;
1003 1000
1004 count = request->count; 1001 count = request->count;
1005 order = drm_order(request->size); 1002 order = order_base_2(request->size);
1006 size = 1 << order; 1003 size = 1 << order;
1007 1004
1008 alignment = (request->flags & _DRM_PAGE_ALIGN) 1005 alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1130,161 +1127,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1130 return 0; 1127 return 0;
1131} 1128}
1132 1129
1133static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1134{
1135 struct drm_device_dma *dma = dev->dma;
1136 struct drm_buf_entry *entry;
1137 struct drm_buf *buf;
1138 unsigned long offset;
1139 unsigned long agp_offset;
1140 int count;
1141 int order;
1142 int size;
1143 int alignment;
1144 int page_order;
1145 int total;
1146 int byte_count;
1147 int i;
1148 struct drm_buf **temp_buflist;
1149
1150 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1151 return -EINVAL;
1152
1153 if (!dma)
1154 return -EINVAL;
1155
1156 if (!capable(CAP_SYS_ADMIN))
1157 return -EPERM;
1158
1159 count = request->count;
1160 order = drm_order(request->size);
1161 size = 1 << order;
1162
1163 alignment = (request->flags & _DRM_PAGE_ALIGN)
1164 ? PAGE_ALIGN(size) : size;
1165 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1166 total = PAGE_SIZE << page_order;
1167
1168 byte_count = 0;
1169 agp_offset = request->agp_start;
1170
1171 DRM_DEBUG("count: %d\n", count);
1172 DRM_DEBUG("order: %d\n", order);
1173 DRM_DEBUG("size: %d\n", size);
1174 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1175 DRM_DEBUG("alignment: %d\n", alignment);
1176 DRM_DEBUG("page_order: %d\n", page_order);
1177 DRM_DEBUG("total: %d\n", total);
1178
1179 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1180 return -EINVAL;
1181
1182 spin_lock(&dev->count_lock);
1183 if (dev->buf_use) {
1184 spin_unlock(&dev->count_lock);
1185 return -EBUSY;
1186 }
1187 atomic_inc(&dev->buf_alloc);
1188 spin_unlock(&dev->count_lock);
1189
1190 mutex_lock(&dev->struct_mutex);
1191 entry = &dma->bufs[order];
1192 if (entry->buf_count) {
1193 mutex_unlock(&dev->struct_mutex);
1194 atomic_dec(&dev->buf_alloc);
1195 return -ENOMEM; /* May only call once for each order */
1196 }
1197
1198 if (count < 0 || count > 4096) {
1199 mutex_unlock(&dev->struct_mutex);
1200 atomic_dec(&dev->buf_alloc);
1201 return -EINVAL;
1202 }
1203
1204 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1205 GFP_KERNEL);
1206 if (!entry->buflist) {
1207 mutex_unlock(&dev->struct_mutex);
1208 atomic_dec(&dev->buf_alloc);
1209 return -ENOMEM;
1210 }
1211
1212 entry->buf_size = size;
1213 entry->page_order = page_order;
1214
1215 offset = 0;
1216
1217 while (entry->buf_count < count) {
1218 buf = &entry->buflist[entry->buf_count];
1219 buf->idx = dma->buf_count + entry->buf_count;
1220 buf->total = alignment;
1221 buf->order = order;
1222 buf->used = 0;
1223
1224 buf->offset = (dma->byte_count + offset);
1225 buf->bus_address = agp_offset + offset;
1226 buf->address = (void *)(agp_offset + offset);
1227 buf->next = NULL;
1228 buf->waiting = 0;
1229 buf->pending = 0;
1230 buf->file_priv = NULL;
1231
1232 buf->dev_priv_size = dev->driver->dev_priv_size;
1233 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1234 if (!buf->dev_private) {
1235 /* Set count correctly so we free the proper amount. */
1236 entry->buf_count = count;
1237 drm_cleanup_buf_error(dev, entry);
1238 mutex_unlock(&dev->struct_mutex);
1239 atomic_dec(&dev->buf_alloc);
1240 return -ENOMEM;
1241 }
1242
1243 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1244
1245 offset += alignment;
1246 entry->buf_count++;
1247 byte_count += PAGE_SIZE << page_order;
1248 }
1249
1250 DRM_DEBUG("byte_count: %d\n", byte_count);
1251
1252 temp_buflist = krealloc(dma->buflist,
1253 (dma->buf_count + entry->buf_count) *
1254 sizeof(*dma->buflist), GFP_KERNEL);
1255 if (!temp_buflist) {
1256 /* Free the entry because it isn't valid */
1257 drm_cleanup_buf_error(dev, entry);
1258 mutex_unlock(&dev->struct_mutex);
1259 atomic_dec(&dev->buf_alloc);
1260 return -ENOMEM;
1261 }
1262 dma->buflist = temp_buflist;
1263
1264 for (i = 0; i < entry->buf_count; i++) {
1265 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1266 }
1267
1268 dma->buf_count += entry->buf_count;
1269 dma->seg_count += entry->seg_count;
1270 dma->page_count += byte_count >> PAGE_SHIFT;
1271 dma->byte_count += byte_count;
1272
1273 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1274 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1275
1276 mutex_unlock(&dev->struct_mutex);
1277
1278 request->count = entry->buf_count;
1279 request->size = size;
1280
1281 dma->flags = _DRM_DMA_USE_FB;
1282
1283 atomic_dec(&dev->buf_alloc);
1284 return 0;
1285}
1286
1287
1288/** 1130/**
1289 * Add buffers for DMA transfers (ioctl). 1131 * Add buffers for DMA transfers (ioctl).
1290 * 1132 *
@@ -1305,6 +1147,9 @@ int drm_addbufs(struct drm_device *dev, void *data,
1305 struct drm_buf_desc *request = data; 1147 struct drm_buf_desc *request = data;
1306 int ret; 1148 int ret;
1307 1149
1150 if (drm_core_check_feature(dev, DRIVER_MODESET))
1151 return -EINVAL;
1152
1308 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1153 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1309 return -EINVAL; 1154 return -EINVAL;
1310 1155
@@ -1316,7 +1161,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
1316 if (request->flags & _DRM_SG_BUFFER) 1161 if (request->flags & _DRM_SG_BUFFER)
1317 ret = drm_addbufs_sg(dev, request); 1162 ret = drm_addbufs_sg(dev, request);
1318 else if (request->flags & _DRM_FB_BUFFER) 1163 else if (request->flags & _DRM_FB_BUFFER)
1319 ret = drm_addbufs_fb(dev, request); 1164 ret = -EINVAL;
1320 else 1165 else
1321 ret = drm_addbufs_pci(dev, request); 1166 ret = drm_addbufs_pci(dev, request);
1322 1167
@@ -1348,6 +1193,9 @@ int drm_infobufs(struct drm_device *dev, void *data,
1348 int i; 1193 int i;
1349 int count; 1194 int count;
1350 1195
1196 if (drm_core_check_feature(dev, DRIVER_MODESET))
1197 return -EINVAL;
1198
1351 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1199 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1352 return -EINVAL; 1200 return -EINVAL;
1353 1201
@@ -1427,6 +1275,9 @@ int drm_markbufs(struct drm_device *dev, void *data,
1427 int order; 1275 int order;
1428 struct drm_buf_entry *entry; 1276 struct drm_buf_entry *entry;
1429 1277
1278 if (drm_core_check_feature(dev, DRIVER_MODESET))
1279 return -EINVAL;
1280
1430 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1281 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1431 return -EINVAL; 1282 return -EINVAL;
1432 1283
@@ -1435,7 +1286,7 @@ int drm_markbufs(struct drm_device *dev, void *data,
1435 1286
1436 DRM_DEBUG("%d, %d, %d\n", 1287 DRM_DEBUG("%d, %d, %d\n",
1437 request->size, request->low_mark, request->high_mark); 1288 request->size, request->low_mark, request->high_mark);
1438 order = drm_order(request->size); 1289 order = order_base_2(request->size);
1439 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1290 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1440 return -EINVAL; 1291 return -EINVAL;
1441 entry = &dma->bufs[order]; 1292 entry = &dma->bufs[order];
@@ -1472,6 +1323,9 @@ int drm_freebufs(struct drm_device *dev, void *data,
1472 int idx; 1323 int idx;
1473 struct drm_buf *buf; 1324 struct drm_buf *buf;
1474 1325
1326 if (drm_core_check_feature(dev, DRIVER_MODESET))
1327 return -EINVAL;
1328
1475 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1329 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1476 return -EINVAL; 1330 return -EINVAL;
1477 1331
@@ -1524,6 +1378,9 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1524 struct drm_buf_map *request = data; 1378 struct drm_buf_map *request = data;
1525 int i; 1379 int i;
1526 1380
1381 if (drm_core_check_feature(dev, DRIVER_MODESET))
1382 return -EINVAL;
1383
1527 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1384 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1528 return -EINVAL; 1385 return -EINVAL;
1529 1386
@@ -1541,9 +1398,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1541 if (request->count >= dma->buf_count) { 1398 if (request->count >= dma->buf_count) {
1542 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) 1399 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1543 || (drm_core_check_feature(dev, DRIVER_SG) 1400 || (drm_core_check_feature(dev, DRIVER_SG)
1544 && (dma->flags & _DRM_DMA_USE_SG)) 1401 && (dma->flags & _DRM_DMA_USE_SG))) {
1545 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1546 && (dma->flags & _DRM_DMA_USE_FB))) {
1547 struct drm_local_map *map = dev->agp_buffer_map; 1402 struct drm_local_map *map = dev->agp_buffer_map;
1548 unsigned long token = dev->agp_buffer_token; 1403 unsigned long token = dev->agp_buffer_token;
1549 1404
@@ -1600,25 +1455,28 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1600 return retcode; 1455 return retcode;
1601} 1456}
1602 1457
1603/** 1458int drm_dma_ioctl(struct drm_device *dev, void *data,
1604 * Compute size order. Returns the exponent of the smaller power of two which 1459 struct drm_file *file_priv)
1605 * is greater or equal to given number.
1606 *
1607 * \param size size.
1608 * \return order.
1609 *
1610 * \todo Can be made faster.
1611 */
1612int drm_order(unsigned long size)
1613{ 1460{
1614 int order; 1461 if (drm_core_check_feature(dev, DRIVER_MODESET))
1615 unsigned long tmp; 1462 return -EINVAL;
1616 1463
1617 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; 1464 if (dev->driver->dma_ioctl)
1465 return dev->driver->dma_ioctl(dev, data, file_priv);
1466 else
1467 return -EINVAL;
1468}
1618 1469
1619 if (size & (size - 1)) 1470struct drm_local_map *drm_getsarea(struct drm_device *dev)
1620 ++order; 1471{
1472 struct drm_map_list *entry;
1621 1473
1622 return order; 1474 list_for_each_entry(entry, &dev->maplist, head) {
1475 if (entry->map && entry->map->type == _DRM_SHM &&
1476 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1477 return entry->map;
1478 }
1479 }
1480 return NULL;
1623} 1481}
1624EXPORT_SYMBOL(drm_order); 1482EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 725968d38976..b4fb86d89850 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -42,10 +42,6 @@
42 42
43#include <drm/drmP.h> 43#include <drm/drmP.h>
44 44
45/******************************************************************/
46/** \name Context bitmap support */
47/*@{*/
48
49/** 45/**
50 * Free a handle from the context bitmap. 46 * Free a handle from the context bitmap.
51 * 47 *
@@ -56,13 +52,48 @@
56 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex 52 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
57 * lock. 53 * lock.
58 */ 54 */
59void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) 55static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
60{ 56{
57 if (drm_core_check_feature(dev, DRIVER_MODESET))
58 return;
59
61 mutex_lock(&dev->struct_mutex); 60 mutex_lock(&dev->struct_mutex);
62 idr_remove(&dev->ctx_idr, ctx_handle); 61 idr_remove(&dev->ctx_idr, ctx_handle);
63 mutex_unlock(&dev->struct_mutex); 62 mutex_unlock(&dev->struct_mutex);
64} 63}
65 64
65/******************************************************************/
66/** \name Context bitmap support */
67/*@{*/
68
69void drm_legacy_ctxbitmap_release(struct drm_device *dev,
70 struct drm_file *file_priv)
71{
72 if (drm_core_check_feature(dev, DRIVER_MODESET))
73 return;
74
75 mutex_lock(&dev->ctxlist_mutex);
76 if (!list_empty(&dev->ctxlist)) {
77 struct drm_ctx_list *pos, *n;
78
79 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
80 if (pos->tag == file_priv &&
81 pos->handle != DRM_KERNEL_CONTEXT) {
82 if (dev->driver->context_dtor)
83 dev->driver->context_dtor(dev,
84 pos->handle);
85
86 drm_ctxbitmap_free(dev, pos->handle);
87
88 list_del(&pos->head);
89 kfree(pos);
90 --dev->ctx_count;
91 }
92 }
93 }
94 mutex_unlock(&dev->ctxlist_mutex);
95}
96
66/** 97/**
67 * Context bitmap allocation. 98 * Context bitmap allocation.
68 * 99 *
@@ -90,10 +121,12 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
90 * 121 *
91 * Initialise the drm_device::ctx_idr 122 * Initialise the drm_device::ctx_idr
92 */ 123 */
93int drm_ctxbitmap_init(struct drm_device * dev) 124void drm_legacy_ctxbitmap_init(struct drm_device * dev)
94{ 125{
126 if (drm_core_check_feature(dev, DRIVER_MODESET))
127 return;
128
95 idr_init(&dev->ctx_idr); 129 idr_init(&dev->ctx_idr);
96 return 0;
97} 130}
98 131
99/** 132/**
@@ -104,7 +137,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
104 * Free all idr members using drm_ctx_sarea_free helper function 137 * Free all idr members using drm_ctx_sarea_free helper function
105 * while holding the drm_device::struct_mutex lock. 138 * while holding the drm_device::struct_mutex lock.
106 */ 139 */
107void drm_ctxbitmap_cleanup(struct drm_device * dev) 140void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
108{ 141{
109 mutex_lock(&dev->struct_mutex); 142 mutex_lock(&dev->struct_mutex);
110 idr_destroy(&dev->ctx_idr); 143 idr_destroy(&dev->ctx_idr);
@@ -136,6 +169,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
136 struct drm_local_map *map; 169 struct drm_local_map *map;
137 struct drm_map_list *_entry; 170 struct drm_map_list *_entry;
138 171
172 if (drm_core_check_feature(dev, DRIVER_MODESET))
173 return -EINVAL;
174
139 mutex_lock(&dev->struct_mutex); 175 mutex_lock(&dev->struct_mutex);
140 176
141 map = idr_find(&dev->ctx_idr, request->ctx_id); 177 map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -180,6 +216,9 @@ int drm_setsareactx(struct drm_device *dev, void *data,
180 struct drm_local_map *map = NULL; 216 struct drm_local_map *map = NULL;
181 struct drm_map_list *r_list = NULL; 217 struct drm_map_list *r_list = NULL;
182 218
219 if (drm_core_check_feature(dev, DRIVER_MODESET))
220 return -EINVAL;
221
183 mutex_lock(&dev->struct_mutex); 222 mutex_lock(&dev->struct_mutex);
184 list_for_each_entry(r_list, &dev->maplist, head) { 223 list_for_each_entry(r_list, &dev->maplist, head) {
185 if (r_list->map 224 if (r_list->map
@@ -251,7 +290,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
251 struct drm_file *file_priv, int new) 290 struct drm_file *file_priv, int new)
252{ 291{
253 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ 292 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
254 dev->last_switch = jiffies;
255 293
256 if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { 294 if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
257 DRM_ERROR("Lock isn't held after context switch\n"); 295 DRM_ERROR("Lock isn't held after context switch\n");
@@ -261,7 +299,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
261 when the kernel holds the lock, release 299 when the kernel holds the lock, release
262 that lock here. */ 300 that lock here. */
263 clear_bit(0, &dev->context_flag); 301 clear_bit(0, &dev->context_flag);
264 wake_up(&dev->context_wait);
265 302
266 return 0; 303 return 0;
267} 304}
@@ -282,6 +319,9 @@ int drm_resctx(struct drm_device *dev, void *data,
282 struct drm_ctx ctx; 319 struct drm_ctx ctx;
283 int i; 320 int i;
284 321
322 if (drm_core_check_feature(dev, DRIVER_MODESET))
323 return -EINVAL;
324
285 if (res->count >= DRM_RESERVED_CONTEXTS) { 325 if (res->count >= DRM_RESERVED_CONTEXTS) {
286 memset(&ctx, 0, sizeof(ctx)); 326 memset(&ctx, 0, sizeof(ctx));
287 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { 327 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -312,6 +352,9 @@ int drm_addctx(struct drm_device *dev, void *data,
312 struct drm_ctx_list *ctx_entry; 352 struct drm_ctx_list *ctx_entry;
313 struct drm_ctx *ctx = data; 353 struct drm_ctx *ctx = data;
314 354
355 if (drm_core_check_feature(dev, DRIVER_MODESET))
356 return -EINVAL;
357
315 ctx->handle = drm_ctxbitmap_next(dev); 358 ctx->handle = drm_ctxbitmap_next(dev);
316 if (ctx->handle == DRM_KERNEL_CONTEXT) { 359 if (ctx->handle == DRM_KERNEL_CONTEXT) {
317 /* Skip kernel's context and get a new one. */ 360 /* Skip kernel's context and get a new one. */
@@ -342,12 +385,6 @@ int drm_addctx(struct drm_device *dev, void *data,
342 return 0; 385 return 0;
343} 386}
344 387
345int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
346{
347 /* This does nothing */
348 return 0;
349}
350
351/** 388/**
352 * Get context. 389 * Get context.
353 * 390 *
@@ -361,6 +398,9 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
361{ 398{
362 struct drm_ctx *ctx = data; 399 struct drm_ctx *ctx = data;
363 400
401 if (drm_core_check_feature(dev, DRIVER_MODESET))
402 return -EINVAL;
403
364 /* This is 0, because we don't handle any context flags */ 404 /* This is 0, because we don't handle any context flags */
365 ctx->flags = 0; 405 ctx->flags = 0;
366 406
@@ -383,6 +423,9 @@ int drm_switchctx(struct drm_device *dev, void *data,
383{ 423{
384 struct drm_ctx *ctx = data; 424 struct drm_ctx *ctx = data;
385 425
426 if (drm_core_check_feature(dev, DRIVER_MODESET))
427 return -EINVAL;
428
386 DRM_DEBUG("%d\n", ctx->handle); 429 DRM_DEBUG("%d\n", ctx->handle);
387 return drm_context_switch(dev, dev->last_context, ctx->handle); 430 return drm_context_switch(dev, dev->last_context, ctx->handle);
388} 431}
@@ -403,6 +446,9 @@ int drm_newctx(struct drm_device *dev, void *data,
403{ 446{
404 struct drm_ctx *ctx = data; 447 struct drm_ctx *ctx = data;
405 448
449 if (drm_core_check_feature(dev, DRIVER_MODESET))
450 return -EINVAL;
451
406 DRM_DEBUG("%d\n", ctx->handle); 452 DRM_DEBUG("%d\n", ctx->handle);
407 drm_context_switch_complete(dev, file_priv, ctx->handle); 453 drm_context_switch_complete(dev, file_priv, ctx->handle);
408 454
@@ -425,6 +471,9 @@ int drm_rmctx(struct drm_device *dev, void *data,
425{ 471{
426 struct drm_ctx *ctx = data; 472 struct drm_ctx *ctx = data;
427 473
474 if (drm_core_check_feature(dev, DRIVER_MODESET))
475 return -EINVAL;
476
428 DRM_DEBUG("%d\n", ctx->handle); 477 DRM_DEBUG("%d\n", ctx->handle);
429 if (ctx->handle != DRM_KERNEL_CONTEXT) { 478 if (ctx->handle != DRM_KERNEL_CONTEXT) {
430 if (dev->driver->context_dtor) 479 if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fc83bb9eb514..bff2fa941f60 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,13 +125,6 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
125 { DRM_MODE_SCALE_ASPECT, "Full aspect" }, 125 { DRM_MODE_SCALE_ASPECT, "Full aspect" },
126}; 126};
127 127
128static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
129{
130 { DRM_MODE_DITHERING_OFF, "Off" },
131 { DRM_MODE_DITHERING_ON, "On" },
132 { DRM_MODE_DITHERING_AUTO, "Automatic" },
133};
134
135/* 128/*
136 * Non-global properties, but "required" for certain connectors. 129 * Non-global properties, but "required" for certain connectors.
137 */ 130 */
@@ -186,29 +179,29 @@ static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
186struct drm_conn_prop_enum_list { 179struct drm_conn_prop_enum_list {
187 int type; 180 int type;
188 const char *name; 181 const char *name;
189 int count; 182 struct ida ida;
190}; 183};
191 184
192/* 185/*
193 * Connector and encoder types. 186 * Connector and encoder types.
194 */ 187 */
195static struct drm_conn_prop_enum_list drm_connector_enum_list[] = 188static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
196{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 }, 189{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
197 { DRM_MODE_CONNECTOR_VGA, "VGA", 0 }, 190 { DRM_MODE_CONNECTOR_VGA, "VGA" },
198 { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 }, 191 { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
199 { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 }, 192 { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
200 { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 }, 193 { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
201 { DRM_MODE_CONNECTOR_Composite, "Composite", 0 }, 194 { DRM_MODE_CONNECTOR_Composite, "Composite" },
202 { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, 195 { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
203 { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, 196 { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
204 { DRM_MODE_CONNECTOR_Component, "Component", 0 }, 197 { DRM_MODE_CONNECTOR_Component, "Component" },
205 { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, 198 { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
206 { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, 199 { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
207 { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, 200 { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
208 { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, 201 { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
209 { DRM_MODE_CONNECTOR_TV, "TV", 0 }, 202 { DRM_MODE_CONNECTOR_TV, "TV" },
210 { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, 203 { DRM_MODE_CONNECTOR_eDP, "eDP" },
211 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0}, 204 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
212}; 205};
213 206
214static const struct drm_prop_enum_list drm_encoder_enum_list[] = 207static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -220,6 +213,22 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
220 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, 213 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
221}; 214};
222 215
216void drm_connector_ida_init(void)
217{
218 int i;
219
220 for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
221 ida_init(&drm_connector_enum_list[i].ida);
222}
223
224void drm_connector_ida_destroy(void)
225{
226 int i;
227
228 for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
229 ida_destroy(&drm_connector_enum_list[i].ida);
230}
231
223const char *drm_get_encoder_name(const struct drm_encoder *encoder) 232const char *drm_get_encoder_name(const struct drm_encoder *encoder)
224{ 233{
225 static char buf[32]; 234 static char buf[32];
@@ -677,20 +686,19 @@ void drm_mode_probed_add(struct drm_connector *connector,
677} 686}
678EXPORT_SYMBOL(drm_mode_probed_add); 687EXPORT_SYMBOL(drm_mode_probed_add);
679 688
680/** 689/*
681 * drm_mode_remove - remove and free a mode 690 * drm_mode_remove - remove and free a mode
682 * @connector: connector list to modify 691 * @connector: connector list to modify
683 * @mode: mode to remove 692 * @mode: mode to remove
684 * 693 *
685 * Remove @mode from @connector's mode list, then free it. 694 * Remove @mode from @connector's mode list, then free it.
686 */ 695 */
687void drm_mode_remove(struct drm_connector *connector, 696static void drm_mode_remove(struct drm_connector *connector,
688 struct drm_display_mode *mode) 697 struct drm_display_mode *mode)
689{ 698{
690 list_del(&mode->head); 699 list_del(&mode->head);
691 drm_mode_destroy(connector->dev, mode); 700 drm_mode_destroy(connector->dev, mode);
692} 701}
693EXPORT_SYMBOL(drm_mode_remove);
694 702
695/** 703/**
696 * drm_connector_init - Init a preallocated connector 704 * drm_connector_init - Init a preallocated connector
@@ -711,6 +719,8 @@ int drm_connector_init(struct drm_device *dev,
711 int connector_type) 719 int connector_type)
712{ 720{
713 int ret; 721 int ret;
722 struct ida *connector_ida =
723 &drm_connector_enum_list[connector_type].ida;
714 724
715 drm_modeset_lock_all(dev); 725 drm_modeset_lock_all(dev);
716 726
@@ -723,7 +733,12 @@ int drm_connector_init(struct drm_device *dev,
723 connector->funcs = funcs; 733 connector->funcs = funcs;
724 connector->connector_type = connector_type; 734 connector->connector_type = connector_type;
725 connector->connector_type_id = 735 connector->connector_type_id =
726 ++drm_connector_enum_list[connector_type].count; /* TODO */ 736 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
737 if (connector->connector_type_id < 0) {
738 ret = connector->connector_type_id;
739 drm_mode_object_put(dev, &connector->base);
740 goto out;
741 }
727 INIT_LIST_HEAD(&connector->probed_modes); 742 INIT_LIST_HEAD(&connector->probed_modes);
728 INIT_LIST_HEAD(&connector->modes); 743 INIT_LIST_HEAD(&connector->modes);
729 connector->edid_blob_ptr = NULL; 744 connector->edid_blob_ptr = NULL;
@@ -764,6 +779,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
764 list_for_each_entry_safe(mode, t, &connector->modes, head) 779 list_for_each_entry_safe(mode, t, &connector->modes, head)
765 drm_mode_remove(connector, mode); 780 drm_mode_remove(connector, mode);
766 781
782 ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
783 connector->connector_type_id);
784
767 drm_mode_object_put(dev, &connector->base); 785 drm_mode_object_put(dev, &connector->base);
768 list_del(&connector->head); 786 list_del(&connector->head);
769 dev->mode_config.num_connector--; 787 dev->mode_config.num_connector--;
@@ -781,6 +799,41 @@ void drm_connector_unplug_all(struct drm_device *dev)
781} 799}
782EXPORT_SYMBOL(drm_connector_unplug_all); 800EXPORT_SYMBOL(drm_connector_unplug_all);
783 801
802int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
803 const struct drm_bridge_funcs *funcs)
804{
805 int ret;
806
807 drm_modeset_lock_all(dev);
808
809 ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE);
810 if (ret)
811 goto out;
812
813 bridge->dev = dev;
814 bridge->funcs = funcs;
815
816 list_add_tail(&bridge->head, &dev->mode_config.bridge_list);
817 dev->mode_config.num_bridge++;
818
819 out:
820 drm_modeset_unlock_all(dev);
821 return ret;
822}
823EXPORT_SYMBOL(drm_bridge_init);
824
825void drm_bridge_cleanup(struct drm_bridge *bridge)
826{
827 struct drm_device *dev = bridge->dev;
828
829 drm_modeset_lock_all(dev);
830 drm_mode_object_put(dev, &bridge->base);
831 list_del(&bridge->head);
832 dev->mode_config.num_bridge--;
833 drm_modeset_unlock_all(dev);
834}
835EXPORT_SYMBOL(drm_bridge_cleanup);
836
784int drm_encoder_init(struct drm_device *dev, 837int drm_encoder_init(struct drm_device *dev,
785 struct drm_encoder *encoder, 838 struct drm_encoder *encoder,
786 const struct drm_encoder_funcs *funcs, 839 const struct drm_encoder_funcs *funcs,
@@ -1135,30 +1188,6 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
1135EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); 1188EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
1136 1189
1137/** 1190/**
1138 * drm_mode_create_dithering_property - create dithering property
1139 * @dev: DRM device
1140 *
1141 * Called by a driver the first time it's needed, must be attached to desired
1142 * connectors.
1143 */
1144int drm_mode_create_dithering_property(struct drm_device *dev)
1145{
1146 struct drm_property *dithering_mode;
1147
1148 if (dev->mode_config.dithering_mode_property)
1149 return 0;
1150
1151 dithering_mode =
1152 drm_property_create_enum(dev, 0, "dithering",
1153 drm_dithering_mode_enum_list,
1154 ARRAY_SIZE(drm_dithering_mode_enum_list));
1155 dev->mode_config.dithering_mode_property = dithering_mode;
1156
1157 return 0;
1158}
1159EXPORT_SYMBOL(drm_mode_create_dithering_property);
1160
1161/**
1162 * drm_mode_create_dirty_property - create dirty property 1191 * drm_mode_create_dirty_property - create dirty property
1163 * @dev: DRM device 1192 * @dev: DRM device
1164 * 1193 *
@@ -1190,6 +1219,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
1190 total_objects += dev->mode_config.num_crtc; 1219 total_objects += dev->mode_config.num_crtc;
1191 total_objects += dev->mode_config.num_connector; 1220 total_objects += dev->mode_config.num_connector;
1192 total_objects += dev->mode_config.num_encoder; 1221 total_objects += dev->mode_config.num_encoder;
1222 total_objects += dev->mode_config.num_bridge;
1193 1223
1194 group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL); 1224 group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
1195 if (!group->id_list) 1225 if (!group->id_list)
@@ -1198,6 +1228,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
1198 group->num_crtcs = 0; 1228 group->num_crtcs = 0;
1199 group->num_connectors = 0; 1229 group->num_connectors = 0;
1200 group->num_encoders = 0; 1230 group->num_encoders = 0;
1231 group->num_bridges = 0;
1201 return 0; 1232 return 0;
1202} 1233}
1203 1234
@@ -1207,6 +1238,7 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
1207 struct drm_crtc *crtc; 1238 struct drm_crtc *crtc;
1208 struct drm_encoder *encoder; 1239 struct drm_encoder *encoder;
1209 struct drm_connector *connector; 1240 struct drm_connector *connector;
1241 struct drm_bridge *bridge;
1210 int ret; 1242 int ret;
1211 1243
1212 if ((ret = drm_mode_group_init(dev, group))) 1244 if ((ret = drm_mode_group_init(dev, group)))
@@ -1223,6 +1255,11 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
1223 group->id_list[group->num_crtcs + group->num_encoders + 1255 group->id_list[group->num_crtcs + group->num_encoders +
1224 group->num_connectors++] = connector->base.id; 1256 group->num_connectors++] = connector->base.id;
1225 1257
1258 list_for_each_entry(bridge, &dev->mode_config.bridge_list, head)
1259 group->id_list[group->num_crtcs + group->num_encoders +
1260 group->num_connectors + group->num_bridges++] =
1261 bridge->base.id;
1262
1226 return 0; 1263 return 0;
1227} 1264}
1228EXPORT_SYMBOL(drm_mode_group_init_legacy_group); 1265EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
@@ -2604,10 +2641,22 @@ int drm_mode_getfb(struct drm_device *dev,
2604 r->depth = fb->depth; 2641 r->depth = fb->depth;
2605 r->bpp = fb->bits_per_pixel; 2642 r->bpp = fb->bits_per_pixel;
2606 r->pitch = fb->pitches[0]; 2643 r->pitch = fb->pitches[0];
2607 if (fb->funcs->create_handle) 2644 if (fb->funcs->create_handle) {
2608 ret = fb->funcs->create_handle(fb, file_priv, &r->handle); 2645 if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
2609 else 2646 ret = fb->funcs->create_handle(fb, file_priv,
2647 &r->handle);
2648 } else {
2649 /* GET_FB() is an unprivileged ioctl so we must not
2650 * return a buffer-handle to non-master processes! For
2651 * backwards-compatibility reasons, we cannot make
2652 * GET_FB() privileged, so just return an invalid handle
2653 * for non-masters. */
2654 r->handle = 0;
2655 ret = 0;
2656 }
2657 } else {
2610 ret = -ENODEV; 2658 ret = -ENODEV;
2659 }
2611 2660
2612 drm_framebuffer_unreference(fb); 2661 drm_framebuffer_unreference(fb);
2613 2662
@@ -3514,6 +3563,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3514 page_flip->reserved != 0) 3563 page_flip->reserved != 0)
3515 return -EINVAL; 3564 return -EINVAL;
3516 3565
3566 if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
3567 return -EINVAL;
3568
3517 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); 3569 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
3518 if (!obj) 3570 if (!obj)
3519 return -EINVAL; 3571 return -EINVAL;
@@ -3587,7 +3639,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3587 } 3639 }
3588 3640
3589 old_fb = crtc->fb; 3641 old_fb = crtc->fb;
3590 ret = crtc->funcs->page_flip(crtc, fb, e); 3642 ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
3591 if (ret) { 3643 if (ret) {
3592 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { 3644 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
3593 spin_lock_irqsave(&dev->event_lock, flags); 3645 spin_lock_irqsave(&dev->event_lock, flags);
@@ -3905,6 +3957,7 @@ void drm_mode_config_init(struct drm_device *dev)
3905 INIT_LIST_HEAD(&dev->mode_config.fb_list); 3957 INIT_LIST_HEAD(&dev->mode_config.fb_list);
3906 INIT_LIST_HEAD(&dev->mode_config.crtc_list); 3958 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
3907 INIT_LIST_HEAD(&dev->mode_config.connector_list); 3959 INIT_LIST_HEAD(&dev->mode_config.connector_list);
3960 INIT_LIST_HEAD(&dev->mode_config.bridge_list);
3908 INIT_LIST_HEAD(&dev->mode_config.encoder_list); 3961 INIT_LIST_HEAD(&dev->mode_config.encoder_list);
3909 INIT_LIST_HEAD(&dev->mode_config.property_list); 3962 INIT_LIST_HEAD(&dev->mode_config.property_list);
3910 INIT_LIST_HEAD(&dev->mode_config.property_blob_list); 3963 INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
@@ -3941,6 +3994,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
3941 struct drm_connector *connector, *ot; 3994 struct drm_connector *connector, *ot;
3942 struct drm_crtc *crtc, *ct; 3995 struct drm_crtc *crtc, *ct;
3943 struct drm_encoder *encoder, *enct; 3996 struct drm_encoder *encoder, *enct;
3997 struct drm_bridge *bridge, *brt;
3944 struct drm_framebuffer *fb, *fbt; 3998 struct drm_framebuffer *fb, *fbt;
3945 struct drm_property *property, *pt; 3999 struct drm_property *property, *pt;
3946 struct drm_property_blob *blob, *bt; 4000 struct drm_property_blob *blob, *bt;
@@ -3951,6 +4005,11 @@ void drm_mode_config_cleanup(struct drm_device *dev)
3951 encoder->funcs->destroy(encoder); 4005 encoder->funcs->destroy(encoder);
3952 } 4006 }
3953 4007
4008 list_for_each_entry_safe(bridge, brt,
4009 &dev->mode_config.bridge_list, head) {
4010 bridge->funcs->destroy(bridge);
4011 }
4012
3954 list_for_each_entry_safe(connector, ot, 4013 list_for_each_entry_safe(connector, ot,
3955 &dev->mode_config.connector_list, head) { 4014 &dev->mode_config.connector_list, head) {
3956 connector->funcs->destroy(connector); 4015 connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 738a4294d820..c722c3b5404d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -257,10 +257,16 @@ drm_encoder_disable(struct drm_encoder *encoder)
257{ 257{
258 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 258 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
259 259
260 if (encoder->bridge)
261 encoder->bridge->funcs->disable(encoder->bridge);
262
260 if (encoder_funcs->disable) 263 if (encoder_funcs->disable)
261 (*encoder_funcs->disable)(encoder); 264 (*encoder_funcs->disable)(encoder);
262 else 265 else
263 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); 266 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
267
268 if (encoder->bridge)
269 encoder->bridge->funcs->post_disable(encoder->bridge);
264} 270}
265 271
266/** 272/**
@@ -424,6 +430,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
424 430
425 if (encoder->crtc != crtc) 431 if (encoder->crtc != crtc)
426 continue; 432 continue;
433
434 if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
435 ret = encoder->bridge->funcs->mode_fixup(
436 encoder->bridge, mode, adjusted_mode);
437 if (!ret) {
438 DRM_DEBUG_KMS("Bridge fixup failed\n");
439 goto done;
440 }
441 }
442
427 encoder_funcs = encoder->helper_private; 443 encoder_funcs = encoder->helper_private;
428 if (!(ret = encoder_funcs->mode_fixup(encoder, mode, 444 if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
429 adjusted_mode))) { 445 adjusted_mode))) {
@@ -443,9 +459,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
443 459
444 if (encoder->crtc != crtc) 460 if (encoder->crtc != crtc)
445 continue; 461 continue;
462
463 if (encoder->bridge)
464 encoder->bridge->funcs->disable(encoder->bridge);
465
446 encoder_funcs = encoder->helper_private; 466 encoder_funcs = encoder->helper_private;
447 /* Disable the encoders as the first thing we do. */ 467 /* Disable the encoders as the first thing we do. */
448 encoder_funcs->prepare(encoder); 468 encoder_funcs->prepare(encoder);
469
470 if (encoder->bridge)
471 encoder->bridge->funcs->post_disable(encoder->bridge);
449 } 472 }
450 473
451 drm_crtc_prepare_encoders(dev); 474 drm_crtc_prepare_encoders(dev);
@@ -469,6 +492,10 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
469 mode->base.id, mode->name); 492 mode->base.id, mode->name);
470 encoder_funcs = encoder->helper_private; 493 encoder_funcs = encoder->helper_private;
471 encoder_funcs->mode_set(encoder, mode, adjusted_mode); 494 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
495
496 if (encoder->bridge && encoder->bridge->funcs->mode_set)
497 encoder->bridge->funcs->mode_set(encoder->bridge, mode,
498 adjusted_mode);
472 } 499 }
473 500
474 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 501 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -479,9 +506,14 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
479 if (encoder->crtc != crtc) 506 if (encoder->crtc != crtc)
480 continue; 507 continue;
481 508
509 if (encoder->bridge)
510 encoder->bridge->funcs->pre_enable(encoder->bridge);
511
482 encoder_funcs = encoder->helper_private; 512 encoder_funcs = encoder->helper_private;
483 encoder_funcs->commit(encoder); 513 encoder_funcs->commit(encoder);
484 514
515 if (encoder->bridge)
516 encoder->bridge->funcs->enable(encoder->bridge);
485 } 517 }
486 518
487 /* Store real post-adjustment hardware mode. */ 519 /* Store real post-adjustment hardware mode. */
@@ -677,6 +709,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
677 /* don't break so fail path works correct */ 709 /* don't break so fail path works correct */
678 fail = 1; 710 fail = 1;
679 break; 711 break;
712
713 if (connector->dpms != DRM_MODE_DPMS_ON) {
714 DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
715 mode_changed = true;
716 }
680 } 717 }
681 } 718 }
682 719
@@ -754,6 +791,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
754 ret = -EINVAL; 791 ret = -EINVAL;
755 goto fail; 792 goto fail;
756 } 793 }
794 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
795 for (i = 0; i < set->num_connectors; i++) {
796 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
797 drm_get_connector_name(set->connectors[i]));
798 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
799 }
757 } 800 }
758 drm_helper_disable_unused_functions(dev); 801 drm_helper_disable_unused_functions(dev);
759 } else if (fb_changed) { 802 } else if (fb_changed) {
@@ -771,22 +814,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
771 } 814 }
772 } 815 }
773 816
774 /*
775 * crtc set_config helpers implicit set the crtc and all connected
776 * encoders to DPMS on for a full mode set. But for just an fb update it
777 * doesn't do that. To not confuse userspace, do an explicit DPMS_ON
778 * unconditionally. This will also ensure driver internal dpms state is
779 * consistent again.
780 */
781 if (set->crtc->enabled) {
782 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
783 for (i = 0; i < set->num_connectors; i++) {
784 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
785 drm_get_connector_name(set->connectors[i]));
786 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
787 }
788 }
789
790 kfree(save_connectors); 817 kfree(save_connectors);
791 kfree(save_encoders); 818 kfree(save_encoders);
792 kfree(save_crtcs); 819 kfree(save_crtcs);
@@ -835,6 +862,31 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
835 return dpms; 862 return dpms;
836} 863}
837 864
865/* Helper which handles bridge ordering around encoder dpms */
866static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
867{
868 struct drm_bridge *bridge = encoder->bridge;
869 struct drm_encoder_helper_funcs *encoder_funcs;
870
871 if (bridge) {
872 if (mode == DRM_MODE_DPMS_ON)
873 bridge->funcs->pre_enable(bridge);
874 else
875 bridge->funcs->disable(bridge);
876 }
877
878 encoder_funcs = encoder->helper_private;
879 if (encoder_funcs->dpms)
880 encoder_funcs->dpms(encoder, mode);
881
882 if (bridge) {
883 if (mode == DRM_MODE_DPMS_ON)
884 bridge->funcs->enable(bridge);
885 else
886 bridge->funcs->post_disable(bridge);
887 }
888}
889
838static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) 890static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
839{ 891{
840 int dpms = DRM_MODE_DPMS_OFF; 892 int dpms = DRM_MODE_DPMS_OFF;
@@ -862,7 +914,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
862{ 914{
863 struct drm_encoder *encoder = connector->encoder; 915 struct drm_encoder *encoder = connector->encoder;
864 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; 916 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
865 int old_dpms; 917 int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
866 918
867 if (mode == connector->dpms) 919 if (mode == connector->dpms)
868 return; 920 return;
@@ -870,6 +922,9 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
870 old_dpms = connector->dpms; 922 old_dpms = connector->dpms;
871 connector->dpms = mode; 923 connector->dpms = mode;
872 924
925 if (encoder)
926 encoder_dpms = drm_helper_choose_encoder_dpms(encoder);
927
873 /* from off to on, do crtc then encoder */ 928 /* from off to on, do crtc then encoder */
874 if (mode < old_dpms) { 929 if (mode < old_dpms) {
875 if (crtc) { 930 if (crtc) {
@@ -878,22 +933,14 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
878 (*crtc_funcs->dpms) (crtc, 933 (*crtc_funcs->dpms) (crtc,
879 drm_helper_choose_crtc_dpms(crtc)); 934 drm_helper_choose_crtc_dpms(crtc));
880 } 935 }
881 if (encoder) { 936 if (encoder)
882 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 937 drm_helper_encoder_dpms(encoder, encoder_dpms);
883 if (encoder_funcs->dpms)
884 (*encoder_funcs->dpms) (encoder,
885 drm_helper_choose_encoder_dpms(encoder));
886 }
887 } 938 }
888 939
889 /* from on to off, do encoder then crtc */ 940 /* from on to off, do encoder then crtc */
890 if (mode > old_dpms) { 941 if (mode > old_dpms) {
891 if (encoder) { 942 if (encoder)
892 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 943 drm_helper_encoder_dpms(encoder, encoder_dpms);
893 if (encoder_funcs->dpms)
894 (*encoder_funcs->dpms) (encoder,
895 drm_helper_choose_encoder_dpms(encoder));
896 }
897 if (crtc) { 944 if (crtc) {
898 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 945 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
899 if (crtc_funcs->dpms) 946 if (crtc_funcs->dpms)
@@ -929,9 +976,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
929{ 976{
930 struct drm_crtc *crtc; 977 struct drm_crtc *crtc;
931 struct drm_encoder *encoder; 978 struct drm_encoder *encoder;
932 struct drm_encoder_helper_funcs *encoder_funcs;
933 struct drm_crtc_helper_funcs *crtc_funcs; 979 struct drm_crtc_helper_funcs *crtc_funcs;
934 int ret; 980 int ret, encoder_dpms;
935 981
936 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 982 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
937 983
@@ -951,10 +997,10 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
951 if(encoder->crtc != crtc) 997 if(encoder->crtc != crtc)
952 continue; 998 continue;
953 999
954 encoder_funcs = encoder->helper_private; 1000 encoder_dpms = drm_helper_choose_encoder_dpms(
955 if (encoder_funcs->dpms) 1001 encoder);
956 (*encoder_funcs->dpms) (encoder, 1002
957 drm_helper_choose_encoder_dpms(encoder)); 1003 drm_helper_encoder_dpms(encoder, encoder_dpms);
958 } 1004 }
959 1005
960 crtc_funcs = crtc->helper_private; 1006 crtc_funcs = crtc->helper_private;
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 495b5fd2787c..8a140a953754 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -44,10 +44,18 @@
44 * 44 *
45 * Allocate and initialize a drm_device_dma structure. 45 * Allocate and initialize a drm_device_dma structure.
46 */ 46 */
47int drm_dma_setup(struct drm_device *dev) 47int drm_legacy_dma_setup(struct drm_device *dev)
48{ 48{
49 int i; 49 int i;
50 50
51 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
52 drm_core_check_feature(dev, DRIVER_MODESET)) {
53 return 0;
54 }
55
56 dev->buf_use = 0;
57 atomic_set(&dev->buf_alloc, 0);
58
51 dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); 59 dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
52 if (!dev->dma) 60 if (!dev->dma)
53 return -ENOMEM; 61 return -ENOMEM;
@@ -66,11 +74,16 @@ int drm_dma_setup(struct drm_device *dev)
66 * Free all pages associated with DMA buffers, the buffers and pages lists, and 74 * Free all pages associated with DMA buffers, the buffers and pages lists, and
67 * finally the drm_device::dma structure itself. 75 * finally the drm_device::dma structure itself.
68 */ 76 */
69void drm_dma_takedown(struct drm_device *dev) 77void drm_legacy_dma_takedown(struct drm_device *dev)
70{ 78{
71 struct drm_device_dma *dma = dev->dma; 79 struct drm_device_dma *dma = dev->dma;
72 int i, j; 80 int i, j;
73 81
82 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
83 drm_core_check_feature(dev, DRIVER_MODESET)) {
84 return;
85 }
86
74 if (!dma) 87 if (!dma)
75 return; 88 return;
76 89
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 99fcd7c32ea2..e572dd20bdee 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -68,7 +68,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
68 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), 68 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED), 71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
73 73
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -87,7 +87,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
87 87
88 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), 88 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
89 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 89 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
90 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 90 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
91 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), 91 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
92 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 92 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
93 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 93 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -106,8 +106,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
106 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), 106 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
107 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), 107 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
108 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), 108 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
109 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ 109 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
110 DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
111 110
112 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 111 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113 112
@@ -122,7 +121,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
122 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 121 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
123#endif 122#endif
124 123
125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 124 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 125 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 126
128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), 127 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
@@ -131,14 +130,14 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
131 130
132 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 131 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
133 132
134 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), 133 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
135 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), 134 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
136 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 135 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
137 136
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139 138
140 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), 139 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
141 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), 140 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
142 141
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -172,6 +171,31 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
172#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 171#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
173 172
174/** 173/**
174 * drm_legacy_dev_reinit
175 *
176 * Reinitializes a legacy/ums drm device in it's lastclose function.
177 */
178static void drm_legacy_dev_reinit(struct drm_device *dev)
179{
180 int i;
181
182 if (drm_core_check_feature(dev, DRIVER_MODESET))
183 return;
184
185 atomic_set(&dev->ioctl_count, 0);
186 atomic_set(&dev->vma_count, 0);
187
188 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
189 atomic_set(&dev->counts[i], 0);
190
191 dev->sigdata.lock = NULL;
192
193 dev->context_flag = 0;
194 dev->last_context = 0;
195 dev->if_version = 0;
196}
197
198/**
175 * Take down the DRM device. 199 * Take down the DRM device.
176 * 200 *
177 * \param dev DRM device structure. 201 * \param dev DRM device structure.
@@ -195,32 +219,9 @@ int drm_lastclose(struct drm_device * dev)
195 219
196 mutex_lock(&dev->struct_mutex); 220 mutex_lock(&dev->struct_mutex);
197 221
198 /* Clear AGP information */ 222 drm_agp_clear(dev);
199 if (drm_core_has_AGP(dev) && dev->agp &&
200 !drm_core_check_feature(dev, DRIVER_MODESET)) {
201 struct drm_agp_mem *entry, *tempe;
202
203 /* Remove AGP resources, but leave dev->agp
204 intact until drv_cleanup is called. */
205 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
206 if (entry->bound)
207 drm_unbind_agp(entry->memory);
208 drm_free_agp(entry->memory, entry->pages);
209 kfree(entry);
210 }
211 INIT_LIST_HEAD(&dev->agp->memory);
212 223
213 if (dev->agp->acquired) 224 drm_legacy_sg_cleanup(dev);
214 drm_agp_release(dev);
215
216 dev->agp->acquired = 0;
217 dev->agp->enabled = 0;
218 }
219 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
220 !drm_core_check_feature(dev, DRIVER_MODESET)) {
221 drm_sg_cleanup(dev->sg);
222 dev->sg = NULL;
223 }
224 225
225 /* Clear vma list (only built for debugging) */ 226 /* Clear vma list (only built for debugging) */
226 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { 227 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
@@ -228,13 +229,13 @@ int drm_lastclose(struct drm_device * dev)
228 kfree(vma); 229 kfree(vma);
229 } 230 }
230 231
231 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 232 drm_legacy_dma_takedown(dev);
232 !drm_core_check_feature(dev, DRIVER_MODESET))
233 drm_dma_takedown(dev);
234 233
235 dev->dev_mapping = NULL; 234 dev->dev_mapping = NULL;
236 mutex_unlock(&dev->struct_mutex); 235 mutex_unlock(&dev->struct_mutex);
237 236
237 drm_legacy_dev_reinit(dev);
238
238 DRM_DEBUG("lastclose completed\n"); 239 DRM_DEBUG("lastclose completed\n");
239 return 0; 240 return 0;
240} 241}
@@ -251,6 +252,7 @@ static int __init drm_core_init(void)
251 int ret = -ENOMEM; 252 int ret = -ENOMEM;
252 253
253 drm_global_init(); 254 drm_global_init();
255 drm_connector_ida_init();
254 idr_init(&drm_minors_idr); 256 idr_init(&drm_minors_idr);
255 257
256 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) 258 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@ -263,13 +265,6 @@ static int __init drm_core_init(void)
263 goto err_p2; 265 goto err_p2;
264 } 266 }
265 267
266 drm_proc_root = proc_mkdir("dri", NULL);
267 if (!drm_proc_root) {
268 DRM_ERROR("Cannot create /proc/dri\n");
269 ret = -1;
270 goto err_p3;
271 }
272
273 drm_debugfs_root = debugfs_create_dir("dri", NULL); 268 drm_debugfs_root = debugfs_create_dir("dri", NULL);
274 if (!drm_debugfs_root) { 269 if (!drm_debugfs_root) {
275 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); 270 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
@@ -292,12 +287,12 @@ err_p1:
292 287
293static void __exit drm_core_exit(void) 288static void __exit drm_core_exit(void)
294{ 289{
295 remove_proc_entry("dri", NULL);
296 debugfs_remove(drm_debugfs_root); 290 debugfs_remove(drm_debugfs_root);
297 drm_sysfs_destroy(); 291 drm_sysfs_destroy();
298 292
299 unregister_chrdev(DRM_MAJOR, "drm"); 293 unregister_chrdev(DRM_MAJOR, "drm");
300 294
295 drm_connector_ida_destroy();
301 idr_destroy(&drm_minors_idr); 296 idr_destroy(&drm_minors_idr);
302} 297}
303 298
@@ -420,17 +415,15 @@ long drm_ioctl(struct file *filp,
420 415
421 /* Do not trust userspace, use our own definition */ 416 /* Do not trust userspace, use our own definition */
422 func = ioctl->func; 417 func = ioctl->func;
423 /* is there a local override? */
424 if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
425 func = dev->driver->dma_ioctl;
426 418
427 if (!func) { 419 if (!func) {
428 DRM_DEBUG("no function\n"); 420 DRM_DEBUG("no function\n");
429 retcode = -EINVAL; 421 retcode = -EINVAL;
430 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || 422 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
431 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || 423 ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) ||
432 ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) || 424 ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
433 (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) { 425 (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ||
426 (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) {
434 retcode = -EACCES; 427 retcode = -EACCES;
435 } else { 428 } else {
436 if (cmd & (IOC_IN | IOC_OUT)) { 429 if (cmd & (IOC_IN | IOC_OUT)) {
@@ -485,19 +478,4 @@ long drm_ioctl(struct file *filp,
485 DRM_DEBUG("ret = %d\n", retcode); 478 DRM_DEBUG("ret = %d\n", retcode);
486 return retcode; 479 return retcode;
487} 480}
488
489EXPORT_SYMBOL(drm_ioctl); 481EXPORT_SYMBOL(drm_ioctl);
490
491struct drm_local_map *drm_getsarea(struct drm_device *dev)
492{
493 struct drm_map_list *entry;
494
495 list_for_each_entry(entry, &dev->maplist, head) {
496 if (entry->map && entry->map->type == _DRM_SHM &&
497 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
498 return entry->map;
499 }
500 }
501 return NULL;
502}
503EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 95d6f4b6967c..1688ff500513 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -125,6 +125,9 @@ static struct edid_quirk {
125 125
126 /* ViewSonic VA2026w */ 126 /* ViewSonic VA2026w */
127 { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, 127 { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
128
129 /* Medion MD 30217 PG */
130 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
128}; 131};
129 132
130/* 133/*
@@ -931,6 +934,36 @@ static const struct drm_display_mode edid_cea_modes[] = {
931 .vrefresh = 100, }, 934 .vrefresh = 100, },
932}; 935};
933 936
937/*
938 * HDMI 1.4 4k modes.
939 */
940static const struct drm_display_mode edid_4k_modes[] = {
941 /* 1 - 3840x2160@30Hz */
942 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
943 3840, 4016, 4104, 4400, 0,
944 2160, 2168, 2178, 2250, 0,
945 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
946 .vrefresh = 30, },
947 /* 2 - 3840x2160@25Hz */
948 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
949 3840, 4896, 4984, 5280, 0,
950 2160, 2168, 2178, 2250, 0,
951 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
952 .vrefresh = 25, },
953 /* 3 - 3840x2160@24Hz */
954 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
955 3840, 5116, 5204, 5500, 0,
956 2160, 2168, 2178, 2250, 0,
957 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
958 .vrefresh = 24, },
959 /* 4 - 4096x2160@24Hz (SMPTE) */
960 { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
961 4096, 5116, 5204, 5500, 0,
962 2160, 2168, 2178, 2250, 0,
963 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
964 .vrefresh = 24, },
965};
966
934/*** DDC fetch and block validation ***/ 967/*** DDC fetch and block validation ***/
935 968
936static const u8 edid_header[] = { 969static const u8 edid_header[] = {
@@ -2287,7 +2320,6 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2287 return closure.modes; 2320 return closure.modes;
2288} 2321}
2289 2322
2290#define HDMI_IDENTIFIER 0x000C03
2291#define AUDIO_BLOCK 0x01 2323#define AUDIO_BLOCK 0x01
2292#define VIDEO_BLOCK 0x02 2324#define VIDEO_BLOCK 0x02
2293#define VENDOR_BLOCK 0x03 2325#define VENDOR_BLOCK 0x03
@@ -2298,10 +2330,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2298#define EDID_CEA_YCRCB422 (1 << 4) 2330#define EDID_CEA_YCRCB422 (1 << 4)
2299#define EDID_CEA_VCDB_QS (1 << 6) 2331#define EDID_CEA_VCDB_QS (1 << 6)
2300 2332
2301/** 2333/*
2302 * Search EDID for CEA extension block. 2334 * Search EDID for CEA extension block.
2303 */ 2335 */
2304u8 *drm_find_cea_extension(struct edid *edid) 2336static u8 *drm_find_cea_extension(struct edid *edid)
2305{ 2337{
2306 u8 *edid_ext = NULL; 2338 u8 *edid_ext = NULL;
2307 int i; 2339 int i;
@@ -2322,7 +2354,6 @@ u8 *drm_find_cea_extension(struct edid *edid)
2322 2354
2323 return edid_ext; 2355 return edid_ext;
2324} 2356}
2325EXPORT_SYMBOL(drm_find_cea_extension);
2326 2357
2327/* 2358/*
2328 * Calculate the alternate clock for the CEA mode 2359 * Calculate the alternate clock for the CEA mode
@@ -2380,6 +2411,54 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2380} 2411}
2381EXPORT_SYMBOL(drm_match_cea_mode); 2412EXPORT_SYMBOL(drm_match_cea_mode);
2382 2413
2414/*
2415 * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
2416 * specific block).
2417 *
2418 * It's almost like cea_mode_alternate_clock(), we just need to add an
2419 * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
2420 * one.
2421 */
2422static unsigned int
2423hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
2424{
2425 if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
2426 return hdmi_mode->clock;
2427
2428 return cea_mode_alternate_clock(hdmi_mode);
2429}
2430
2431/*
2432 * drm_match_hdmi_mode - look for a HDMI mode matching given mode
2433 * @to_match: display mode
2434 *
2435 * An HDMI mode is one defined in the HDMI vendor specific block.
2436 *
2437 * Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one.
2438 */
2439static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
2440{
2441 u8 mode;
2442
2443 if (!to_match->clock)
2444 return 0;
2445
2446 for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
2447 const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
2448 unsigned int clock1, clock2;
2449
2450 /* Make sure to also match alternate clocks */
2451 clock1 = hdmi_mode->clock;
2452 clock2 = hdmi_mode_alternate_clock(hdmi_mode);
2453
2454 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2455 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2456 drm_mode_equal_no_clocks(to_match, hdmi_mode))
2457 return mode + 1;
2458 }
2459 return 0;
2460}
2461
2383static int 2462static int
2384add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) 2463add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2385{ 2464{
@@ -2397,18 +2476,26 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2397 * with the alternate clock for certain CEA modes. 2476 * with the alternate clock for certain CEA modes.
2398 */ 2477 */
2399 list_for_each_entry(mode, &connector->probed_modes, head) { 2478 list_for_each_entry(mode, &connector->probed_modes, head) {
2400 const struct drm_display_mode *cea_mode; 2479 const struct drm_display_mode *cea_mode = NULL;
2401 struct drm_display_mode *newmode; 2480 struct drm_display_mode *newmode;
2402 u8 cea_mode_idx = drm_match_cea_mode(mode) - 1; 2481 u8 mode_idx = drm_match_cea_mode(mode) - 1;
2403 unsigned int clock1, clock2; 2482 unsigned int clock1, clock2;
2404 2483
2405 if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes)) 2484 if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
2406 continue; 2485 cea_mode = &edid_cea_modes[mode_idx];
2486 clock2 = cea_mode_alternate_clock(cea_mode);
2487 } else {
2488 mode_idx = drm_match_hdmi_mode(mode) - 1;
2489 if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
2490 cea_mode = &edid_4k_modes[mode_idx];
2491 clock2 = hdmi_mode_alternate_clock(cea_mode);
2492 }
2493 }
2407 2494
2408 cea_mode = &edid_cea_modes[cea_mode_idx]; 2495 if (!cea_mode)
2496 continue;
2409 2497
2410 clock1 = cea_mode->clock; 2498 clock1 = cea_mode->clock;
2411 clock2 = cea_mode_alternate_clock(cea_mode);
2412 2499
2413 if (clock1 == clock2) 2500 if (clock1 == clock2)
2414 continue; 2501 continue;
@@ -2442,10 +2529,11 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2442} 2529}
2443 2530
2444static int 2531static int
2445do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) 2532do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
2446{ 2533{
2447 struct drm_device *dev = connector->dev; 2534 struct drm_device *dev = connector->dev;
2448 u8 * mode, cea_mode; 2535 const u8 *mode;
2536 u8 cea_mode;
2449 int modes = 0; 2537 int modes = 0;
2450 2538
2451 for (mode = db; mode < db + len; mode++) { 2539 for (mode = db; mode < db + len; mode++) {
@@ -2465,6 +2553,68 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
2465 return modes; 2553 return modes;
2466} 2554}
2467 2555
2556/*
2557 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
2558 * @connector: connector corresponding to the HDMI sink
2559 * @db: start of the CEA vendor specific block
2560 * @len: length of the CEA block payload, ie. one can access up to db[len]
2561 *
2562 * Parses the HDMI VSDB looking for modes to add to @connector.
2563 */
2564static int
2565do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
2566{
2567 struct drm_device *dev = connector->dev;
2568 int modes = 0, offset = 0, i;
2569 u8 vic_len;
2570
2571 if (len < 8)
2572 goto out;
2573
2574 /* no HDMI_Video_Present */
2575 if (!(db[8] & (1 << 5)))
2576 goto out;
2577
2578 /* Latency_Fields_Present */
2579 if (db[8] & (1 << 7))
2580 offset += 2;
2581
2582 /* I_Latency_Fields_Present */
2583 if (db[8] & (1 << 6))
2584 offset += 2;
2585
2586 /* the declared length is not long enough for the 2 first bytes
2587 * of additional video format capabilities */
2588 offset += 2;
2589 if (len < (8 + offset))
2590 goto out;
2591
2592 vic_len = db[8 + offset] >> 5;
2593
2594 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
2595 struct drm_display_mode *newmode;
2596 u8 vic;
2597
2598 vic = db[9 + offset + i];
2599
2600 vic--; /* VICs start at 1 */
2601 if (vic >= ARRAY_SIZE(edid_4k_modes)) {
2602 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2603 continue;
2604 }
2605
2606 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
2607 if (!newmode)
2608 continue;
2609
2610 drm_mode_probed_add(connector, newmode);
2611 modes++;
2612 }
2613
2614out:
2615 return modes;
2616}
2617
2468static int 2618static int
2469cea_db_payload_len(const u8 *db) 2619cea_db_payload_len(const u8 *db)
2470{ 2620{
@@ -2496,14 +2646,30 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
2496 return 0; 2646 return 0;
2497} 2647}
2498 2648
2649static bool cea_db_is_hdmi_vsdb(const u8 *db)
2650{
2651 int hdmi_id;
2652
2653 if (cea_db_tag(db) != VENDOR_BLOCK)
2654 return false;
2655
2656 if (cea_db_payload_len(db) < 5)
2657 return false;
2658
2659 hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
2660
2661 return hdmi_id == HDMI_IEEE_OUI;
2662}
2663
2499#define for_each_cea_db(cea, i, start, end) \ 2664#define for_each_cea_db(cea, i, start, end) \
2500 for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) 2665 for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
2501 2666
2502static int 2667static int
2503add_cea_modes(struct drm_connector *connector, struct edid *edid) 2668add_cea_modes(struct drm_connector *connector, struct edid *edid)
2504{ 2669{
2505 u8 * cea = drm_find_cea_extension(edid); 2670 const u8 *cea = drm_find_cea_extension(edid);
2506 u8 * db, dbl; 2671 const u8 *db;
2672 u8 dbl;
2507 int modes = 0; 2673 int modes = 0;
2508 2674
2509 if (cea && cea_revision(cea) >= 3) { 2675 if (cea && cea_revision(cea) >= 3) {
@@ -2517,7 +2683,9 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
2517 dbl = cea_db_payload_len(db); 2683 dbl = cea_db_payload_len(db);
2518 2684
2519 if (cea_db_tag(db) == VIDEO_BLOCK) 2685 if (cea_db_tag(db) == VIDEO_BLOCK)
2520 modes += do_cea_modes (connector, db+1, dbl); 2686 modes += do_cea_modes(connector, db + 1, dbl);
2687 else if (cea_db_is_hdmi_vsdb(db))
2688 modes += do_hdmi_vsdb_modes(connector, db, dbl);
2521 } 2689 }
2522 } 2690 }
2523 2691
@@ -2570,21 +2738,6 @@ monitor_name(struct detailed_timing *t, void *data)
2570 *(u8 **)data = t->data.other_data.data.str.str; 2738 *(u8 **)data = t->data.other_data.data.str.str;
2571} 2739}
2572 2740
2573static bool cea_db_is_hdmi_vsdb(const u8 *db)
2574{
2575 int hdmi_id;
2576
2577 if (cea_db_tag(db) != VENDOR_BLOCK)
2578 return false;
2579
2580 if (cea_db_payload_len(db) < 5)
2581 return false;
2582
2583 hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
2584
2585 return hdmi_id == HDMI_IDENTIFIER;
2586}
2587
2588/** 2741/**
2589 * drm_edid_to_eld - build ELD from EDID 2742 * drm_edid_to_eld - build ELD from EDID
2590 * @connector: connector corresponding to the HDMI/DP sink 2743 * @connector: connector corresponding to the HDMI/DP sink
@@ -2732,6 +2885,58 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
2732EXPORT_SYMBOL(drm_edid_to_sad); 2885EXPORT_SYMBOL(drm_edid_to_sad);
2733 2886
2734/** 2887/**
2888 * drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
2889 * @edid: EDID to parse
2890 * @sadb: pointer to the speaker block
2891 *
2892 * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
2893 * Note: returned pointer needs to be kfreed
2894 *
2895 * Return number of found Speaker Allocation Blocks or negative number on error.
2896 */
2897int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
2898{
2899 int count = 0;
2900 int i, start, end, dbl;
2901 const u8 *cea;
2902
2903 cea = drm_find_cea_extension(edid);
2904 if (!cea) {
2905 DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
2906 return -ENOENT;
2907 }
2908
2909 if (cea_revision(cea) < 3) {
2910 DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
2911 return -ENOTSUPP;
2912 }
2913
2914 if (cea_db_offsets(cea, &start, &end)) {
2915 DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
2916 return -EPROTO;
2917 }
2918
2919 for_each_cea_db(cea, i, start, end) {
2920 const u8 *db = &cea[i];
2921
2922 if (cea_db_tag(db) == SPEAKER_BLOCK) {
2923 dbl = cea_db_payload_len(db);
2924
2925 /* Speaker Allocation Data Block */
2926 if (dbl == 3) {
2927 *sadb = kmalloc(dbl, GFP_KERNEL);
2928 memcpy(*sadb, &db[1], dbl);
2929 count = dbl;
2930 break;
2931 }
2932 }
2933 }
2934
2935 return count;
2936}
2937EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
2938
2939/**
2735 * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond 2940 * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
2736 * @connector: connector associated with the HDMI/DP sink 2941 * @connector: connector associated with the HDMI/DP sink
2737 * @mode: the display mode 2942 * @mode: the display mode
@@ -3102,9 +3307,10 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3102 if (err < 0) 3307 if (err < 0)
3103 return err; 3308 return err;
3104 3309
3310 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
3311 frame->pixel_repeat = 1;
3312
3105 frame->video_code = drm_match_cea_mode(mode); 3313 frame->video_code = drm_match_cea_mode(mode);
3106 if (!frame->video_code)
3107 return 0;
3108 3314
3109 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; 3315 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
3110 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; 3316 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
@@ -3112,3 +3318,39 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3112 return 0; 3318 return 0;
3113} 3319}
3114EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); 3320EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
3321
3322/**
3323 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
3324 * data from a DRM display mode
3325 * @frame: HDMI vendor infoframe
3326 * @mode: DRM display mode
3327 *
3328 * Note that there's is a need to send HDMI vendor infoframes only when using a
3329 * 4k or stereoscopic 3D mode. So when giving any other mode as input this
3330 * function will return -EINVAL, error that can be safely ignored.
3331 *
3332 * Returns 0 on success or a negative error code on failure.
3333 */
3334int
3335drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
3336 const struct drm_display_mode *mode)
3337{
3338 int err;
3339 u8 vic;
3340
3341 if (!frame || !mode)
3342 return -EINVAL;
3343
3344 vic = drm_match_hdmi_mode(mode);
3345 if (!vic)
3346 return -EINVAL;
3347
3348 err = hdmi_vendor_infoframe_init(frame);
3349 if (err < 0)
3350 return err;
3351
3352 frame->vic = vic;
3353
3354 return 0;
3355}
3356EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c385cc5e730e..61b5a47ad239 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -181,11 +181,11 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
181EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); 181EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
182 182
183#ifdef CONFIG_DEBUG_FS 183#ifdef CONFIG_DEBUG_FS
184/** 184/*
185 * drm_fb_cma_describe() - Helper to dump information about a single 185 * drm_fb_cma_describe() - Helper to dump information about a single
186 * CMA framebuffer object 186 * CMA framebuffer object
187 */ 187 */
188void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) 188static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
189{ 189{
190 struct drm_fb_cma *fb_cma = to_fb_cma(fb); 190 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
191 int i, n = drm_format_num_planes(fb->pixel_format); 191 int i, n = drm_format_num_planes(fb->pixel_format);
@@ -199,7 +199,6 @@ void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
199 drm_gem_cma_describe(fb_cma->obj[i], m); 199 drm_gem_cma_describe(fb_cma->obj[i], m);
200 } 200 }
201} 201}
202EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
203 202
204/** 203/**
205 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects 204 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
new file mode 100644
index 000000000000..e788882d9021
--- /dev/null
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "drmP.h"
25#include "drm_flip_work.h"
26
27/**
28 * drm_flip_work_queue - queue work
29 * @work: the flip-work
30 * @val: the value to queue
31 *
32 * Queues work, that will later be run (passed back to drm_flip_func_t
33 * func) on a work queue after drm_flip_work_commit() is called.
34 */
35void drm_flip_work_queue(struct drm_flip_work *work, void *val)
36{
37 if (kfifo_put(&work->fifo, (const void **)&val)) {
38 atomic_inc(&work->pending);
39 } else {
40 DRM_ERROR("%s fifo full!\n", work->name);
41 work->func(work, val);
42 }
43}
44EXPORT_SYMBOL(drm_flip_work_queue);
45
46/**
47 * drm_flip_work_commit - commit queued work
48 * @work: the flip-work
49 * @wq: the work-queue to run the queued work on
50 *
51 * Trigger work previously queued by drm_flip_work_queue() to run
52 * on a workqueue. The typical usage would be to queue work (via
53 * drm_flip_work_queue()) at any point (from vblank irq and/or
54 * prior), and then from vblank irq commit the queued work.
55 */
56void drm_flip_work_commit(struct drm_flip_work *work,
57 struct workqueue_struct *wq)
58{
59 uint32_t pending = atomic_read(&work->pending);
60 atomic_add(pending, &work->count);
61 atomic_sub(pending, &work->pending);
62 queue_work(wq, &work->worker);
63}
64EXPORT_SYMBOL(drm_flip_work_commit);
65
66static void flip_worker(struct work_struct *w)
67{
68 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
69 uint32_t count = atomic_read(&work->count);
70 void *val = NULL;
71
72 atomic_sub(count, &work->count);
73
74 while(count--)
75 if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
76 work->func(work, val);
77}
78
79/**
80 * drm_flip_work_init - initialize flip-work
81 * @work: the flip-work to initialize
82 * @size: the max queue depth
83 * @name: debug name
84 * @func: the callback work function
85 *
86 * Initializes/allocates resources for the flip-work
87 *
88 * RETURNS:
89 * Zero on success, error code on failure.
90 */
91int drm_flip_work_init(struct drm_flip_work *work, int size,
92 const char *name, drm_flip_func_t func)
93{
94 int ret;
95
96 work->name = name;
97 atomic_set(&work->count, 0);
98 atomic_set(&work->pending, 0);
99 work->func = func;
100
101 ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
102 if (ret) {
103 DRM_ERROR("could not allocate %s fifo\n", name);
104 return ret;
105 }
106
107 INIT_WORK(&work->worker, flip_worker);
108
109 return 0;
110}
111EXPORT_SYMBOL(drm_flip_work_init);
112
113/**
114 * drm_flip_work_cleanup - cleans up flip-work
115 * @work: the flip-work to cleanup
116 *
117 * Destroy resources allocated for the flip-work
118 */
119void drm_flip_work_cleanup(struct drm_flip_work *work)
120{
121 WARN_ON(!kfifo_is_empty(&work->fifo));
122 kfifo_free(&work->fifo);
123}
124EXPORT_SYMBOL(drm_flip_work_cleanup);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a24385e0368..4be8e09a32ef 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -48,59 +48,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
48 48
49static int drm_setup(struct drm_device * dev) 49static int drm_setup(struct drm_device * dev)
50{ 50{
51 int i;
52 int ret; 51 int ret;
53 52
54 if (dev->driver->firstopen) { 53 if (dev->driver->firstopen &&
54 !drm_core_check_feature(dev, DRIVER_MODESET)) {
55 ret = dev->driver->firstopen(dev); 55 ret = dev->driver->firstopen(dev);
56 if (ret != 0) 56 if (ret != 0)
57 return ret; 57 return ret;
58 } 58 }
59 59
60 atomic_set(&dev->ioctl_count, 0); 60 ret = drm_legacy_dma_setup(dev);
61 atomic_set(&dev->vma_count, 0); 61 if (ret < 0)
62 62 return ret;
63 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
64 !drm_core_check_feature(dev, DRIVER_MODESET)) {
65 dev->buf_use = 0;
66 atomic_set(&dev->buf_alloc, 0);
67
68 i = drm_dma_setup(dev);
69 if (i < 0)
70 return i;
71 }
72
73 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
74 atomic_set(&dev->counts[i], 0);
75
76 dev->sigdata.lock = NULL;
77
78 dev->context_flag = 0;
79 dev->interrupt_flag = 0;
80 dev->dma_flag = 0;
81 dev->last_context = 0;
82 dev->last_switch = 0;
83 dev->last_checked = 0;
84 init_waitqueue_head(&dev->context_wait);
85 dev->if_version = 0;
86
87 dev->ctx_start = 0;
88 dev->lck_start = 0;
89 63
90 dev->buf_async = NULL;
91 init_waitqueue_head(&dev->buf_readers);
92 init_waitqueue_head(&dev->buf_writers);
93 64
94 DRM_DEBUG("\n"); 65 DRM_DEBUG("\n");
95
96 /*
97 * The kernel's context could be created here, but is now created
98 * in drm_dma_enqueue. This is more resource-efficient for
99 * hardware that does not do DMA, but may mean that
100 * drm_select_queue fails between the time the interrupt is
101 * initialized and the time the queues are initialized.
102 */
103
104 return 0; 66 return 0;
105} 67}
106 68
@@ -257,7 +219,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
257 return -EBUSY; /* No exclusive opens */ 219 return -EBUSY; /* No exclusive opens */
258 if (!drm_cpu_valid()) 220 if (!drm_cpu_valid())
259 return -EINVAL; 221 return -EINVAL;
260 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 222 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
261 return -EINVAL; 223 return -EINVAL;
262 224
263 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); 225 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
@@ -300,10 +262,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
300 goto out_prime_destroy; 262 goto out_prime_destroy;
301 } 263 }
302 264
303 265 /* if there is no current master make this fd it, but do not create
304 /* if there is no current master make this fd it */ 266 * any master object for render clients */
305 mutex_lock(&dev->struct_mutex); 267 mutex_lock(&dev->struct_mutex);
306 if (!priv->minor->master) { 268 if (!priv->minor->master && !drm_is_render_client(priv)) {
307 /* create a new master */ 269 /* create a new master */
308 priv->minor->master = drm_master_create(priv->minor); 270 priv->minor->master = drm_master_create(priv->minor);
309 if (!priv->minor->master) { 271 if (!priv->minor->master) {
@@ -341,12 +303,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
341 goto out_close; 303 goto out_close;
342 } 304 }
343 } 305 }
344 mutex_unlock(&dev->struct_mutex); 306 } else if (!drm_is_render_client(priv)) {
345 } else {
346 /* get a reference to the master */ 307 /* get a reference to the master */
347 priv->master = drm_master_get(priv->minor->master); 308 priv->master = drm_master_get(priv->minor->master);
348 mutex_unlock(&dev->struct_mutex);
349 } 309 }
310 mutex_unlock(&dev->struct_mutex);
350 311
351 mutex_lock(&dev->struct_mutex); 312 mutex_lock(&dev->struct_mutex);
352 list_add(&priv->lhead, &dev->filelist); 313 list_add(&priv->lhead, &dev->filelist);
@@ -388,18 +349,6 @@ out_put_pid:
388 return ret; 349 return ret;
389} 350}
390 351
391/** No-op. */
392int drm_fasync(int fd, struct file *filp, int on)
393{
394 struct drm_file *priv = filp->private_data;
395 struct drm_device *dev = priv->minor->dev;
396
397 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
398 (long)old_encode_dev(priv->minor->device));
399 return fasync_helper(fd, filp, on, &dev->buf_async);
400}
401EXPORT_SYMBOL(drm_fasync);
402
403static void drm_master_release(struct drm_device *dev, struct file *filp) 352static void drm_master_release(struct drm_device *dev, struct file *filp)
404{ 353{
405 struct drm_file *file_priv = filp->private_data; 354 struct drm_file *file_priv = filp->private_data;
@@ -490,26 +439,7 @@ int drm_release(struct inode *inode, struct file *filp)
490 if (dev->driver->driver_features & DRIVER_GEM) 439 if (dev->driver->driver_features & DRIVER_GEM)
491 drm_gem_release(dev, file_priv); 440 drm_gem_release(dev, file_priv);
492 441
493 mutex_lock(&dev->ctxlist_mutex); 442 drm_legacy_ctxbitmap_release(dev, file_priv);
494 if (!list_empty(&dev->ctxlist)) {
495 struct drm_ctx_list *pos, *n;
496
497 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
498 if (pos->tag == file_priv &&
499 pos->handle != DRM_KERNEL_CONTEXT) {
500 if (dev->driver->context_dtor)
501 dev->driver->context_dtor(dev,
502 pos->handle);
503
504 drm_ctxbitmap_free(dev, pos->handle);
505
506 list_del(&pos->head);
507 kfree(pos);
508 --dev->ctx_count;
509 }
510 }
511 }
512 mutex_unlock(&dev->ctxlist_mutex);
513 443
514 mutex_lock(&dev->struct_mutex); 444 mutex_lock(&dev->struct_mutex);
515 445
@@ -547,7 +477,8 @@ int drm_release(struct inode *inode, struct file *filp)
547 iput(container_of(dev->dev_mapping, struct inode, i_data)); 477 iput(container_of(dev->dev_mapping, struct inode, i_data));
548 478
549 /* drop the reference held my the file priv */ 479 /* drop the reference held my the file priv */
550 drm_master_put(&file_priv->master); 480 if (file_priv->master)
481 drm_master_put(&file_priv->master);
551 file_priv->is_master = 0; 482 file_priv->is_master = 0;
552 list_del(&file_priv->lhead); 483 list_del(&file_priv->lhead);
553 mutex_unlock(&dev->struct_mutex); 484 mutex_unlock(&dev->struct_mutex);
@@ -555,6 +486,7 @@ int drm_release(struct inode *inode, struct file *filp)
555 if (dev->driver->postclose) 486 if (dev->driver->postclose)
556 dev->driver->postclose(dev, file_priv); 487 dev->driver->postclose(dev, file_priv);
557 488
489
558 if (drm_core_check_feature(dev, DRIVER_PRIME)) 490 if (drm_core_check_feature(dev, DRIVER_PRIME))
559 drm_prime_destroy_file_private(&file_priv->prime); 491 drm_prime_destroy_file_private(&file_priv->prime);
560 492
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 603f256152ef..49293bdc972a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -37,6 +37,7 @@
37#include <linux/shmem_fs.h> 37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h> 38#include <linux/dma-buf.h>
39#include <drm/drmP.h> 39#include <drm/drmP.h>
40#include <drm/drm_vma_manager.h>
40 41
41/** @file drm_gem.c 42/** @file drm_gem.c
42 * 43 *
@@ -92,7 +93,7 @@ drm_gem_init(struct drm_device *dev)
92{ 93{
93 struct drm_gem_mm *mm; 94 struct drm_gem_mm *mm;
94 95
95 spin_lock_init(&dev->object_name_lock); 96 mutex_init(&dev->object_name_lock);
96 idr_init(&dev->object_name_idr); 97 idr_init(&dev->object_name_idr);
97 98
98 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 99 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
@@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev)
102 } 103 }
103 104
104 dev->mm_private = mm; 105 dev->mm_private = mm;
105 106 drm_vma_offset_manager_init(&mm->vma_manager,
106 if (drm_ht_create(&mm->offset_hash, 12)) { 107 DRM_FILE_PAGE_OFFSET_START,
107 kfree(mm); 108 DRM_FILE_PAGE_OFFSET_SIZE);
108 return -ENOMEM;
109 }
110
111 drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112 DRM_FILE_PAGE_OFFSET_SIZE);
113 109
114 return 0; 110 return 0;
115} 111}
@@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev)
119{ 115{
120 struct drm_gem_mm *mm = dev->mm_private; 116 struct drm_gem_mm *mm = dev->mm_private;
121 117
122 drm_mm_takedown(&mm->offset_manager); 118 drm_vma_offset_manager_destroy(&mm->vma_manager);
123 drm_ht_remove(&mm->offset_hash);
124 kfree(mm); 119 kfree(mm);
125 dev->mm_private = NULL; 120 dev->mm_private = NULL;
126} 121}
@@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev)
132int drm_gem_object_init(struct drm_device *dev, 127int drm_gem_object_init(struct drm_device *dev,
133 struct drm_gem_object *obj, size_t size) 128 struct drm_gem_object *obj, size_t size)
134{ 129{
135 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 130 struct file *filp;
136 131
137 obj->dev = dev; 132 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
138 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 133 if (IS_ERR(filp))
139 if (IS_ERR(obj->filp)) 134 return PTR_ERR(filp);
140 return PTR_ERR(obj->filp);
141 135
142 kref_init(&obj->refcount); 136 drm_gem_private_object_init(dev, obj, size);
143 atomic_set(&obj->handle_count, 0); 137 obj->filp = filp;
144 obj->size = size;
145 138
146 return 0; 139 return 0;
147} 140}
@@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init);
152 * no GEM provided backing store. Instead the caller is responsible for 145 * no GEM provided backing store. Instead the caller is responsible for
153 * backing the object and handling it. 146 * backing the object and handling it.
154 */ 147 */
155int drm_gem_private_object_init(struct drm_device *dev, 148void drm_gem_private_object_init(struct drm_device *dev,
156 struct drm_gem_object *obj, size_t size) 149 struct drm_gem_object *obj, size_t size)
157{ 150{
158 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 151 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
159 152
@@ -161,10 +154,9 @@ int drm_gem_private_object_init(struct drm_device *dev,
161 obj->filp = NULL; 154 obj->filp = NULL;
162 155
163 kref_init(&obj->refcount); 156 kref_init(&obj->refcount);
164 atomic_set(&obj->handle_count, 0); 157 obj->handle_count = 0;
165 obj->size = size; 158 obj->size = size;
166 159 drm_vma_node_reset(&obj->vma_node);
167 return 0;
168} 160}
169EXPORT_SYMBOL(drm_gem_private_object_init); 161EXPORT_SYMBOL(drm_gem_private_object_init);
170 162
@@ -200,16 +192,79 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
200static void 192static void
201drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 193drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
202{ 194{
203 if (obj->import_attach) { 195 /*
204 drm_prime_remove_buf_handle(&filp->prime, 196 * Note: obj->dma_buf can't disappear as long as we still hold a
205 obj->import_attach->dmabuf); 197 * handle reference in obj->handle_count.
198 */
199 mutex_lock(&filp->prime.lock);
200 if (obj->dma_buf) {
201 drm_prime_remove_buf_handle_locked(&filp->prime,
202 obj->dma_buf);
206 } 203 }
207 if (obj->export_dma_buf) { 204 mutex_unlock(&filp->prime.lock);
208 drm_prime_remove_buf_handle(&filp->prime, 205}
209 obj->export_dma_buf); 206
207static void drm_gem_object_ref_bug(struct kref *list_kref)
208{
209 BUG();
210}
211
212/**
213 * Called after the last handle to the object has been closed
214 *
215 * Removes any name for the object. Note that this must be
216 * called before drm_gem_object_free or we'll be touching
217 * freed memory
218 */
219static void drm_gem_object_handle_free(struct drm_gem_object *obj)
220{
221 struct drm_device *dev = obj->dev;
222
223 /* Remove any name for this object */
224 if (obj->name) {
225 idr_remove(&dev->object_name_idr, obj->name);
226 obj->name = 0;
227 /*
228 * The object name held a reference to this object, drop
229 * that now.
230 *
231 * This cannot be the last reference, since the handle holds one too.
232 */
233 kref_put(&obj->refcount, drm_gem_object_ref_bug);
210 } 234 }
211} 235}
212 236
237static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
238{
239 /* Unbreak the reference cycle if we have an exported dma_buf. */
240 if (obj->dma_buf) {
241 dma_buf_put(obj->dma_buf);
242 obj->dma_buf = NULL;
243 }
244}
245
246static void
247drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
248{
249 if (WARN_ON(obj->handle_count == 0))
250 return;
251
252 /*
253 * Must bump handle count first as this may be the last
254 * ref, in which case the object would disappear before we
255 * checked for a name
256 */
257
258 mutex_lock(&obj->dev->object_name_lock);
259 if (--obj->handle_count == 0) {
260 drm_gem_object_handle_free(obj);
261 drm_gem_object_exported_dma_buf_free(obj);
262 }
263 mutex_unlock(&obj->dev->object_name_lock);
264
265 drm_gem_object_unreference_unlocked(obj);
266}
267
213/** 268/**
214 * Removes the mapping from handle to filp for this object. 269 * Removes the mapping from handle to filp for this object.
215 */ 270 */
@@ -242,7 +297,9 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
242 idr_remove(&filp->object_idr, handle); 297 idr_remove(&filp->object_idr, handle);
243 spin_unlock(&filp->table_lock); 298 spin_unlock(&filp->table_lock);
244 299
245 drm_gem_remove_prime_handles(obj, filp); 300 if (drm_core_check_feature(dev, DRIVER_PRIME))
301 drm_gem_remove_prime_handles(obj, filp);
302 drm_vma_node_revoke(&obj->vma_node, filp->filp);
246 303
247 if (dev->driver->gem_close_object) 304 if (dev->driver->gem_close_object)
248 dev->driver->gem_close_object(obj, filp); 305 dev->driver->gem_close_object(obj, filp);
@@ -253,18 +310,36 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
253EXPORT_SYMBOL(drm_gem_handle_delete); 310EXPORT_SYMBOL(drm_gem_handle_delete);
254 311
255/** 312/**
256 * Create a handle for this object. This adds a handle reference 313 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
257 * to the object, which includes a regular reference count. Callers 314 *
258 * will likely want to dereference the object afterwards. 315 * This implements the ->dumb_destroy kms driver callback for drivers which use
316 * gem to manage their backing storage.
317 */
318int drm_gem_dumb_destroy(struct drm_file *file,
319 struct drm_device *dev,
320 uint32_t handle)
321{
322 return drm_gem_handle_delete(file, handle);
323}
324EXPORT_SYMBOL(drm_gem_dumb_destroy);
325
326/**
327 * drm_gem_handle_create_tail - internal functions to create a handle
328 *
329 * This expects the dev->object_name_lock to be held already and will drop it
330 * before returning. Used to avoid races in establishing new handles when
331 * importing an object from either an flink name or a dma-buf.
259 */ 332 */
260int 333int
261drm_gem_handle_create(struct drm_file *file_priv, 334drm_gem_handle_create_tail(struct drm_file *file_priv,
262 struct drm_gem_object *obj, 335 struct drm_gem_object *obj,
263 u32 *handlep) 336 u32 *handlep)
264{ 337{
265 struct drm_device *dev = obj->dev; 338 struct drm_device *dev = obj->dev;
266 int ret; 339 int ret;
267 340
341 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
342
268 /* 343 /*
269 * Get the user-visible handle using idr. Preload and perform 344 * Get the user-visible handle using idr. Preload and perform
270 * allocation under our spinlock. 345 * allocation under our spinlock.
@@ -273,14 +348,22 @@ drm_gem_handle_create(struct drm_file *file_priv,
273 spin_lock(&file_priv->table_lock); 348 spin_lock(&file_priv->table_lock);
274 349
275 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 350 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
276 351 drm_gem_object_reference(obj);
352 obj->handle_count++;
277 spin_unlock(&file_priv->table_lock); 353 spin_unlock(&file_priv->table_lock);
278 idr_preload_end(); 354 idr_preload_end();
279 if (ret < 0) 355 mutex_unlock(&dev->object_name_lock);
356 if (ret < 0) {
357 drm_gem_object_handle_unreference_unlocked(obj);
280 return ret; 358 return ret;
359 }
281 *handlep = ret; 360 *handlep = ret;
282 361
283 drm_gem_object_handle_reference(obj); 362 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
363 if (ret) {
364 drm_gem_handle_delete(file_priv, *handlep);
365 return ret;
366 }
284 367
285 if (dev->driver->gem_open_object) { 368 if (dev->driver->gem_open_object) {
286 ret = dev->driver->gem_open_object(obj, file_priv); 369 ret = dev->driver->gem_open_object(obj, file_priv);
@@ -292,6 +375,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
292 375
293 return 0; 376 return 0;
294} 377}
378
379/**
380 * Create a handle for this object. This adds a handle reference
381 * to the object, which includes a regular reference count. Callers
382 * will likely want to dereference the object afterwards.
383 */
384int
385drm_gem_handle_create(struct drm_file *file_priv,
386 struct drm_gem_object *obj,
387 u32 *handlep)
388{
389 mutex_lock(&obj->dev->object_name_lock);
390
391 return drm_gem_handle_create_tail(file_priv, obj, handlep);
392}
295EXPORT_SYMBOL(drm_gem_handle_create); 393EXPORT_SYMBOL(drm_gem_handle_create);
296 394
297 395
@@ -306,81 +404,155 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
306{ 404{
307 struct drm_device *dev = obj->dev; 405 struct drm_device *dev = obj->dev;
308 struct drm_gem_mm *mm = dev->mm_private; 406 struct drm_gem_mm *mm = dev->mm_private;
309 struct drm_map_list *list = &obj->map_list;
310 407
311 drm_ht_remove_item(&mm->offset_hash, &list->hash); 408 drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
312 drm_mm_put_block(list->file_offset_node);
313 kfree(list->map);
314 list->map = NULL;
315} 409}
316EXPORT_SYMBOL(drm_gem_free_mmap_offset); 410EXPORT_SYMBOL(drm_gem_free_mmap_offset);
317 411
318/** 412/**
319 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 413 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
320 * @obj: obj in question 414 * @obj: obj in question
415 * @size: the virtual size
321 * 416 *
322 * GEM memory mapping works by handing back to userspace a fake mmap offset 417 * GEM memory mapping works by handing back to userspace a fake mmap offset
323 * it can use in a subsequent mmap(2) call. The DRM core code then looks 418 * it can use in a subsequent mmap(2) call. The DRM core code then looks
324 * up the object based on the offset and sets up the various memory mapping 419 * up the object based on the offset and sets up the various memory mapping
325 * structures. 420 * structures.
326 * 421 *
327 * This routine allocates and attaches a fake offset for @obj. 422 * This routine allocates and attaches a fake offset for @obj, in cases where
423 * the virtual size differs from the physical size (ie. obj->size). Otherwise
424 * just use drm_gem_create_mmap_offset().
328 */ 425 */
329int 426int
330drm_gem_create_mmap_offset(struct drm_gem_object *obj) 427drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
331{ 428{
332 struct drm_device *dev = obj->dev; 429 struct drm_device *dev = obj->dev;
333 struct drm_gem_mm *mm = dev->mm_private; 430 struct drm_gem_mm *mm = dev->mm_private;
334 struct drm_map_list *list;
335 struct drm_local_map *map;
336 int ret;
337 431
338 /* Set the object up for mmap'ing */ 432 return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
339 list = &obj->map_list; 433 size / PAGE_SIZE);
340 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); 434}
341 if (!list->map) 435EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
342 return -ENOMEM;
343
344 map = list->map;
345 map->type = _DRM_GEM;
346 map->size = obj->size;
347 map->handle = obj;
348 436
349 /* Get a DRM GEM mmap offset allocated... */ 437/**
350 list->file_offset_node = drm_mm_search_free(&mm->offset_manager, 438 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
351 obj->size / PAGE_SIZE, 0, false); 439 * @obj: obj in question
440 *
441 * GEM memory mapping works by handing back to userspace a fake mmap offset
442 * it can use in a subsequent mmap(2) call. The DRM core code then looks
443 * up the object based on the offset and sets up the various memory mapping
444 * structures.
445 *
446 * This routine allocates and attaches a fake offset for @obj.
447 */
448int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
449{
450 return drm_gem_create_mmap_offset_size(obj, obj->size);
451}
452EXPORT_SYMBOL(drm_gem_create_mmap_offset);
352 453
353 if (!list->file_offset_node) { 454/**
354 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 455 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
355 ret = -ENOSPC; 456 * from shmem
356 goto out_free_list; 457 * @obj: obj in question
458 * @gfpmask: gfp mask of requested pages
459 */
460struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
461{
462 struct inode *inode;
463 struct address_space *mapping;
464 struct page *p, **pages;
465 int i, npages;
466
467 /* This is the shared memory object that backs the GEM resource */
468 inode = file_inode(obj->filp);
469 mapping = inode->i_mapping;
470
471 /* We already BUG_ON() for non-page-aligned sizes in
472 * drm_gem_object_init(), so we should never hit this unless
473 * driver author is doing something really wrong:
474 */
475 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
476
477 npages = obj->size >> PAGE_SHIFT;
478
479 pages = drm_malloc_ab(npages, sizeof(struct page *));
480 if (pages == NULL)
481 return ERR_PTR(-ENOMEM);
482
483 gfpmask |= mapping_gfp_mask(mapping);
484
485 for (i = 0; i < npages; i++) {
486 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
487 if (IS_ERR(p))
488 goto fail;
489 pages[i] = p;
490
491 /* There is a hypothetical issue w/ drivers that require
492 * buffer memory in the low 4GB.. if the pages are un-
493 * pinned, and swapped out, they can end up swapped back
494 * in above 4GB. If pages are already in memory, then
495 * shmem_read_mapping_page_gfp will ignore the gfpmask,
496 * even if the already in-memory page disobeys the mask.
497 *
498 * It is only a theoretical issue today, because none of
499 * the devices with this limitation can be populated with
500 * enough memory to trigger the issue. But this BUG_ON()
501 * is here as a reminder in case the problem with
502 * shmem_read_mapping_page_gfp() isn't solved by the time
503 * it does become a real issue.
504 *
505 * See this thread: http://lkml.org/lkml/2011/7/11/238
506 */
507 BUG_ON((gfpmask & __GFP_DMA32) &&
508 (page_to_pfn(p) >= 0x00100000UL));
357 } 509 }
358 510
359 list->file_offset_node = drm_mm_get_block(list->file_offset_node, 511 return pages;
360 obj->size / PAGE_SIZE, 0);
361 if (!list->file_offset_node) {
362 ret = -ENOMEM;
363 goto out_free_list;
364 }
365 512
366 list->hash.key = list->file_offset_node->start; 513fail:
367 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); 514 while (i--)
368 if (ret) { 515 page_cache_release(pages[i]);
369 DRM_ERROR("failed to add to map hash\n");
370 goto out_free_mm;
371 }
372 516
373 return 0; 517 drm_free_large(pages);
518 return ERR_CAST(p);
519}
520EXPORT_SYMBOL(drm_gem_get_pages);
374 521
375out_free_mm: 522/**
376 drm_mm_put_block(list->file_offset_node); 523 * drm_gem_put_pages - helper to free backing pages for a GEM object
377out_free_list: 524 * @obj: obj in question
378 kfree(list->map); 525 * @pages: pages to free
379 list->map = NULL; 526 * @dirty: if true, pages will be marked as dirty
527 * @accessed: if true, the pages will be marked as accessed
528 */
529void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
530 bool dirty, bool accessed)
531{
532 int i, npages;
380 533
381 return ret; 534 /* We already BUG_ON() for non-page-aligned sizes in
535 * drm_gem_object_init(), so we should never hit this unless
536 * driver author is doing something really wrong:
537 */
538 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
539
540 npages = obj->size >> PAGE_SHIFT;
541
542 for (i = 0; i < npages; i++) {
543 if (dirty)
544 set_page_dirty(pages[i]);
545
546 if (accessed)
547 mark_page_accessed(pages[i]);
548
549 /* Undo the reference we took when populating the table */
550 page_cache_release(pages[i]);
551 }
552
553 drm_free_large(pages);
382} 554}
383EXPORT_SYMBOL(drm_gem_create_mmap_offset); 555EXPORT_SYMBOL(drm_gem_put_pages);
384 556
385/** Returns a reference to the object named by the handle. */ 557/** Returns a reference to the object named by the handle. */
386struct drm_gem_object * 558struct drm_gem_object *
@@ -445,8 +617,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
445 if (obj == NULL) 617 if (obj == NULL)
446 return -ENOENT; 618 return -ENOENT;
447 619
620 mutex_lock(&dev->object_name_lock);
448 idr_preload(GFP_KERNEL); 621 idr_preload(GFP_KERNEL);
449 spin_lock(&dev->object_name_lock); 622 /* prevent races with concurrent gem_close. */
623 if (obj->handle_count == 0) {
624 ret = -ENOENT;
625 goto err;
626 }
627
450 if (!obj->name) { 628 if (!obj->name) {
451 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); 629 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
452 if (ret < 0) 630 if (ret < 0)
@@ -462,8 +640,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
462 ret = 0; 640 ret = 0;
463 641
464err: 642err:
465 spin_unlock(&dev->object_name_lock);
466 idr_preload_end(); 643 idr_preload_end();
644 mutex_unlock(&dev->object_name_lock);
467 drm_gem_object_unreference_unlocked(obj); 645 drm_gem_object_unreference_unlocked(obj);
468 return ret; 646 return ret;
469} 647}
@@ -486,15 +664,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
486 if (!(dev->driver->driver_features & DRIVER_GEM)) 664 if (!(dev->driver->driver_features & DRIVER_GEM))
487 return -ENODEV; 665 return -ENODEV;
488 666
489 spin_lock(&dev->object_name_lock); 667 mutex_lock(&dev->object_name_lock);
490 obj = idr_find(&dev->object_name_idr, (int) args->name); 668 obj = idr_find(&dev->object_name_idr, (int) args->name);
491 if (obj) 669 if (obj) {
492 drm_gem_object_reference(obj); 670 drm_gem_object_reference(obj);
493 spin_unlock(&dev->object_name_lock); 671 } else {
494 if (!obj) 672 mutex_unlock(&dev->object_name_lock);
495 return -ENOENT; 673 return -ENOENT;
674 }
496 675
497 ret = drm_gem_handle_create(file_priv, obj, &handle); 676 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
677 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
498 drm_gem_object_unreference_unlocked(obj); 678 drm_gem_object_unreference_unlocked(obj);
499 if (ret) 679 if (ret)
500 return ret; 680 return ret;
@@ -527,7 +707,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
527 struct drm_gem_object *obj = ptr; 707 struct drm_gem_object *obj = ptr;
528 struct drm_device *dev = obj->dev; 708 struct drm_device *dev = obj->dev;
529 709
530 drm_gem_remove_prime_handles(obj, file_priv); 710 if (drm_core_check_feature(dev, DRIVER_PRIME))
711 drm_gem_remove_prime_handles(obj, file_priv);
712 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
531 713
532 if (dev->driver->gem_close_object) 714 if (dev->driver->gem_close_object)
533 dev->driver->gem_close_object(obj, file_priv); 715 dev->driver->gem_close_object(obj, file_priv);
@@ -553,6 +735,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
553void 735void
554drm_gem_object_release(struct drm_gem_object *obj) 736drm_gem_object_release(struct drm_gem_object *obj)
555{ 737{
738 WARN_ON(obj->dma_buf);
739
556 if (obj->filp) 740 if (obj->filp)
557 fput(obj->filp); 741 fput(obj->filp);
558} 742}
@@ -577,41 +761,6 @@ drm_gem_object_free(struct kref *kref)
577} 761}
578EXPORT_SYMBOL(drm_gem_object_free); 762EXPORT_SYMBOL(drm_gem_object_free);
579 763
580static void drm_gem_object_ref_bug(struct kref *list_kref)
581{
582 BUG();
583}
584
585/**
586 * Called after the last handle to the object has been closed
587 *
588 * Removes any name for the object. Note that this must be
589 * called before drm_gem_object_free or we'll be touching
590 * freed memory
591 */
592void drm_gem_object_handle_free(struct drm_gem_object *obj)
593{
594 struct drm_device *dev = obj->dev;
595
596 /* Remove any name for this object */
597 spin_lock(&dev->object_name_lock);
598 if (obj->name) {
599 idr_remove(&dev->object_name_idr, obj->name);
600 obj->name = 0;
601 spin_unlock(&dev->object_name_lock);
602 /*
603 * The object name held a reference to this object, drop
604 * that now.
605 *
606 * This cannot be the last reference, since the handle holds one too.
607 */
608 kref_put(&obj->refcount, drm_gem_object_ref_bug);
609 } else
610 spin_unlock(&dev->object_name_lock);
611
612}
613EXPORT_SYMBOL(drm_gem_object_handle_free);
614
615void drm_gem_vm_open(struct vm_area_struct *vma) 764void drm_gem_vm_open(struct vm_area_struct *vma)
616{ 765{
617 struct drm_gem_object *obj = vma->vm_private_data; 766 struct drm_gem_object *obj = vma->vm_private_data;
@@ -653,6 +802,10 @@ EXPORT_SYMBOL(drm_gem_vm_close);
653 * the GEM object is not looked up based on its fake offset. To implement the 802 * the GEM object is not looked up based on its fake offset. To implement the
654 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 803 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
655 * 804 *
805 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
806 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
807 * callers must verify access restrictions before calling this helper.
808 *
656 * NOTE: This function has to be protected with dev->struct_mutex 809 * NOTE: This function has to be protected with dev->struct_mutex
657 * 810 *
658 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 811 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
@@ -701,14 +854,17 @@ EXPORT_SYMBOL(drm_gem_mmap_obj);
701 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 854 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
702 * contain the fake offset we created when the GTT map ioctl was called on 855 * contain the fake offset we created when the GTT map ioctl was called on
703 * the object) and map it with a call to drm_gem_mmap_obj(). 856 * the object) and map it with a call to drm_gem_mmap_obj().
857 *
858 * If the caller is not granted access to the buffer object, the mmap will fail
859 * with EACCES. Please see the vma manager for more information.
704 */ 860 */
705int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 861int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
706{ 862{
707 struct drm_file *priv = filp->private_data; 863 struct drm_file *priv = filp->private_data;
708 struct drm_device *dev = priv->minor->dev; 864 struct drm_device *dev = priv->minor->dev;
709 struct drm_gem_mm *mm = dev->mm_private; 865 struct drm_gem_mm *mm = dev->mm_private;
710 struct drm_local_map *map = NULL; 866 struct drm_gem_object *obj;
711 struct drm_hash_item *hash; 867 struct drm_vma_offset_node *node;
712 int ret = 0; 868 int ret = 0;
713 869
714 if (drm_device_is_unplugged(dev)) 870 if (drm_device_is_unplugged(dev))
@@ -716,21 +872,19 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
716 872
717 mutex_lock(&dev->struct_mutex); 873 mutex_lock(&dev->struct_mutex);
718 874
719 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { 875 node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
876 vma_pages(vma));
877 if (!node) {
720 mutex_unlock(&dev->struct_mutex); 878 mutex_unlock(&dev->struct_mutex);
721 return drm_mmap(filp, vma); 879 return drm_mmap(filp, vma);
880 } else if (!drm_vma_node_is_allowed(node, filp)) {
881 mutex_unlock(&dev->struct_mutex);
882 return -EACCES;
722 } 883 }
723 884
724 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 885 obj = container_of(node, struct drm_gem_object, vma_node);
725 if (!map || 886 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
726 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
727 ret = -EPERM;
728 goto out_unlock;
729 }
730
731 ret = drm_gem_mmap_obj(map->handle, map->size, vma);
732 887
733out_unlock:
734 mutex_unlock(&dev->struct_mutex); 888 mutex_unlock(&dev->struct_mutex);
735 889
736 return ret; 890 return ret;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index ece72a8ac245..0a4f80574eb4 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -27,11 +27,7 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm.h> 28#include <drm/drm.h>
29#include <drm/drm_gem_cma_helper.h> 29#include <drm/drm_gem_cma_helper.h>
30 30#include <drm/drm_vma_manager.h>
31static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
32{
33 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
34}
35 31
36/* 32/*
37 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory 33 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
@@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
172{ 168{
173 struct drm_gem_cma_object *cma_obj; 169 struct drm_gem_cma_object *cma_obj;
174 170
175 if (gem_obj->map_list.map) 171 drm_gem_free_mmap_offset(gem_obj);
176 drm_gem_free_mmap_offset(gem_obj);
177 172
178 cma_obj = to_drm_gem_cma_obj(gem_obj); 173 cma_obj = to_drm_gem_cma_obj(gem_obj);
179 174
@@ -237,7 +232,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
237 return -EINVAL; 232 return -EINVAL;
238 } 233 }
239 234
240 *offset = get_gem_mmap_offset(gem_obj); 235 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
241 236
242 drm_gem_object_unreference(gem_obj); 237 drm_gem_object_unreference(gem_obj);
243 238
@@ -286,27 +281,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
286} 281}
287EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); 282EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
288 283
289/*
290 * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
291 */
292int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
293 struct drm_device *drm, unsigned int handle)
294{
295 return drm_gem_handle_delete(file_priv, handle);
296}
297EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
298
299#ifdef CONFIG_DEBUG_FS 284#ifdef CONFIG_DEBUG_FS
300void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m) 285void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
301{ 286{
302 struct drm_gem_object *obj = &cma_obj->base; 287 struct drm_gem_object *obj = &cma_obj->base;
303 struct drm_device *dev = obj->dev; 288 struct drm_device *dev = obj->dev;
304 uint64_t off = 0; 289 uint64_t off;
305 290
306 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 291 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
307 292
308 if (obj->map_list.map) 293 off = drm_vma_node_start(&obj->vma_node);
309 off = (uint64_t)obj->map_list.hash.key;
310 294
311 seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d", 295 seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
312 obj->name, obj->refcount.refcount.counter, 296 obj->name, obj->refcount.refcount.counter,
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index d4b20ceda3fb..53298320080b 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -207,7 +207,7 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
207 207
208 seq_printf(m, "%6d %8zd %7d %8d\n", 208 seq_printf(m, "%6d %8zd %7d %8d\n",
209 obj->name, obj->size, 209 obj->name, obj->size,
210 atomic_read(&obj->handle_count), 210 obj->handle_count,
211 atomic_read(&obj->refcount.refcount)); 211 atomic_read(&obj->refcount.refcount));
212 return 0; 212 return 0;
213} 213}
@@ -218,7 +218,11 @@ int drm_gem_name_info(struct seq_file *m, void *data)
218 struct drm_device *dev = node->minor->dev; 218 struct drm_device *dev = node->minor->dev;
219 219
220 seq_printf(m, " name size handles refcount\n"); 220 seq_printf(m, " name size handles refcount\n");
221
222 mutex_lock(&dev->object_name_lock);
221 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m); 223 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
224 mutex_unlock(&dev->object_name_lock);
225
222 return 0; 226 return 0;
223} 227}
224 228
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ffd7a7ba70d4..07247e2855a2 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -217,29 +217,30 @@ int drm_getclient(struct drm_device *dev, void *data,
217 struct drm_file *file_priv) 217 struct drm_file *file_priv)
218{ 218{
219 struct drm_client *client = data; 219 struct drm_client *client = data;
220 struct drm_file *pt;
221 int idx;
222 int i;
223 220
224 idx = client->idx; 221 /*
225 i = 0; 222 * Hollowed-out getclient ioctl to keep some dead old drm tests/tools
226 223 * not breaking completely. Userspace tools stop enumerating one they
227 mutex_lock(&dev->struct_mutex); 224 * get -EINVAL, hence this is the return value we need to hand back for
228 list_for_each_entry(pt, &dev->filelist, lhead) { 225 * no clients tracked.
229 if (i++ >= idx) { 226 *
230 client->auth = pt->authenticated; 227 * Unfortunately some clients (*cough* libva *cough*) use this in a fun
231 client->pid = pid_vnr(pt->pid); 228 * attempt to figure out whether they're authenticated or not. Since
232 client->uid = from_kuid_munged(current_user_ns(), pt->uid); 229 * that's the only thing they care about, give it to the directly
233 client->magic = pt->magic; 230 * instead of walking one giant list.
234 client->iocs = pt->ioctl_count; 231 */
235 mutex_unlock(&dev->struct_mutex); 232 if (client->idx == 0) {
236 233 client->auth = file_priv->authenticated;
237 return 0; 234 client->pid = pid_vnr(file_priv->pid);
238 } 235 client->uid = from_kuid_munged(current_user_ns(),
236 file_priv->uid);
237 client->magic = 0;
238 client->iocs = 0;
239
240 return 0;
241 } else {
242 return -EINVAL;
239 } 243 }
240 mutex_unlock(&dev->struct_mutex);
241
242 return -EINVAL;
243} 244}
244 245
245/** 246/**
@@ -256,21 +257,10 @@ int drm_getstats(struct drm_device *dev, void *data,
256 struct drm_file *file_priv) 257 struct drm_file *file_priv)
257{ 258{
258 struct drm_stats *stats = data; 259 struct drm_stats *stats = data;
259 int i;
260 260
261 /* Clear stats to prevent userspace from eating its stack garbage. */
261 memset(stats, 0, sizeof(*stats)); 262 memset(stats, 0, sizeof(*stats));
262 263
263 for (i = 0; i < dev->counters; i++) {
264 if (dev->types[i] == _DRM_STAT_LOCK)
265 stats->data[i].value =
266 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
267 else
268 stats->data[i].value = atomic_read(&dev->counts[i]);
269 stats->data[i].type = dev->types[i];
270 }
271
272 stats->count = dev->counters;
273
274 return 0; 264 return 0;
275} 265}
276 266
@@ -303,6 +293,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
303 case DRM_CAP_TIMESTAMP_MONOTONIC: 293 case DRM_CAP_TIMESTAMP_MONOTONIC:
304 req->value = drm_timestamp_monotonic; 294 req->value = drm_timestamp_monotonic;
305 break; 295 break;
296 case DRM_CAP_ASYNC_PAGE_FLIP:
297 req->value = dev->mode_config.async_page_flip;
298 break;
306 default: 299 default:
307 return -EINVAL; 300 return -EINVAL;
308 } 301 }
@@ -352,9 +345,6 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
352 retcode = -EINVAL; 345 retcode = -EINVAL;
353 goto done; 346 goto done;
354 } 347 }
355
356 if (dev->driver->set_version)
357 dev->driver->set_version(dev, sv);
358 } 348 }
359 349
360done: 350done:
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8bcce7866d36..f92da0a32f0d 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
708 /* Subtract time delta from raw timestamp to get final 708 /* Subtract time delta from raw timestamp to get final
709 * vblank_time timestamp for end of vblank. 709 * vblank_time timestamp for end of vblank.
710 */ 710 */
711 etime = ktime_sub_ns(etime, delta_ns); 711 if (delta_ns < 0)
712 etime = ktime_add_ns(etime, -delta_ns);
713 else
714 etime = ktime_sub_ns(etime, delta_ns);
712 *vblank_time = ktime_to_timeval(etime); 715 *vblank_time = ktime_to_timeval(etime);
713 716
714 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", 717 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 126d50ea181f..64e44fad8ae8 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -86,7 +86,6 @@ void drm_free_agp(DRM_AGP_MEM * handle, int pages)
86{ 86{
87 agp_free_memory(handle); 87 agp_free_memory(handle);
88} 88}
89EXPORT_SYMBOL(drm_free_agp);
90 89
91/** Wrapper around agp_bind_memory() */ 90/** Wrapper around agp_bind_memory() */
92int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) 91int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -99,7 +98,6 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
99{ 98{
100 return agp_unbind_memory(handle); 99 return agp_unbind_memory(handle);
101} 100}
102EXPORT_SYMBOL(drm_unbind_agp);
103 101
104#else /* __OS_HAS_AGP */ 102#else /* __OS_HAS_AGP */
105static inline void *agp_remap(unsigned long offset, unsigned long size, 103static inline void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 543b9b3171d3..af93cc55259f 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -49,58 +49,18 @@
49 49
50#define MM_UNUSED_TARGET 4 50#define MM_UNUSED_TARGET 4
51 51
52static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) 52static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
53{ 53 unsigned long size,
54 struct drm_mm_node *child; 54 unsigned alignment,
55 55 unsigned long color,
56 if (atomic) 56 enum drm_mm_search_flags flags);
57 child = kzalloc(sizeof(*child), GFP_ATOMIC); 57static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
58 else 58 unsigned long size,
59 child = kzalloc(sizeof(*child), GFP_KERNEL); 59 unsigned alignment,
60 60 unsigned long color,
61 if (unlikely(child == NULL)) { 61 unsigned long start,
62 spin_lock(&mm->unused_lock); 62 unsigned long end,
63 if (list_empty(&mm->unused_nodes)) 63 enum drm_mm_search_flags flags);
64 child = NULL;
65 else {
66 child =
67 list_entry(mm->unused_nodes.next,
68 struct drm_mm_node, node_list);
69 list_del(&child->node_list);
70 --mm->num_unused;
71 }
72 spin_unlock(&mm->unused_lock);
73 }
74 return child;
75}
76
77/* drm_mm_pre_get() - pre allocate drm_mm_node structure
78 * drm_mm: memory manager struct we are pre-allocating for
79 *
80 * Returns 0 on success or -ENOMEM if allocation fails.
81 */
82int drm_mm_pre_get(struct drm_mm *mm)
83{
84 struct drm_mm_node *node;
85
86 spin_lock(&mm->unused_lock);
87 while (mm->num_unused < MM_UNUSED_TARGET) {
88 spin_unlock(&mm->unused_lock);
89 node = kzalloc(sizeof(*node), GFP_KERNEL);
90 spin_lock(&mm->unused_lock);
91
92 if (unlikely(node == NULL)) {
93 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 spin_unlock(&mm->unused_lock);
95 return ret;
96 }
97 ++mm->num_unused;
98 list_add_tail(&node->node_list, &mm->unused_nodes);
99 }
100 spin_unlock(&mm->unused_lock);
101 return 0;
102}
103EXPORT_SYMBOL(drm_mm_pre_get);
104 64
105static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 65static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106 struct drm_mm_node *node, 66 struct drm_mm_node *node,
@@ -147,33 +107,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
147 } 107 }
148} 108}
149 109
150struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, 110int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
151 unsigned long start,
152 unsigned long size,
153 bool atomic)
154{ 111{
155 struct drm_mm_node *hole, *node; 112 struct drm_mm_node *hole;
156 unsigned long end = start + size; 113 unsigned long end = node->start + node->size;
157 unsigned long hole_start; 114 unsigned long hole_start;
158 unsigned long hole_end; 115 unsigned long hole_end;
159 116
117 BUG_ON(node == NULL);
118
119 /* Find the relevant hole to add our node to */
160 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 120 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161 if (hole_start > start || hole_end < end) 121 if (hole_start > node->start || hole_end < end)
162 continue; 122 continue;
163 123
164 node = drm_mm_kmalloc(mm, atomic);
165 if (unlikely(node == NULL))
166 return NULL;
167
168 node->start = start;
169 node->size = size;
170 node->mm = mm; 124 node->mm = mm;
171 node->allocated = 1; 125 node->allocated = 1;
172 126
173 INIT_LIST_HEAD(&node->hole_stack); 127 INIT_LIST_HEAD(&node->hole_stack);
174 list_add(&node->node_list, &hole->node_list); 128 list_add(&node->node_list, &hole->node_list);
175 129
176 if (start == hole_start) { 130 if (node->start == hole_start) {
177 hole->hole_follows = 0; 131 hole->hole_follows = 0;
178 list_del_init(&hole->hole_stack); 132 list_del_init(&hole->hole_stack);
179 } 133 }
@@ -184,31 +138,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
184 node->hole_follows = 1; 138 node->hole_follows = 1;
185 } 139 }
186 140
187 return node; 141 return 0;
188 } 142 }
189 143
190 WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size); 144 WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
191 return NULL; 145 node->start, node->size);
192} 146 return -ENOSPC;
193EXPORT_SYMBOL(drm_mm_create_block);
194
195struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
196 unsigned long size,
197 unsigned alignment,
198 unsigned long color,
199 int atomic)
200{
201 struct drm_mm_node *node;
202
203 node = drm_mm_kmalloc(hole_node->mm, atomic);
204 if (unlikely(node == NULL))
205 return NULL;
206
207 drm_mm_insert_helper(hole_node, node, size, alignment, color);
208
209 return node;
210} 147}
211EXPORT_SYMBOL(drm_mm_get_block_generic); 148EXPORT_SYMBOL(drm_mm_reserve_node);
212 149
213/** 150/**
214 * Search for free space and insert a preallocated memory node. Returns 151 * Search for free space and insert a preallocated memory node. Returns
@@ -217,12 +154,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
217 */ 154 */
218int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 155int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
219 unsigned long size, unsigned alignment, 156 unsigned long size, unsigned alignment,
220 unsigned long color) 157 unsigned long color,
158 enum drm_mm_search_flags flags)
221{ 159{
222 struct drm_mm_node *hole_node; 160 struct drm_mm_node *hole_node;
223 161
224 hole_node = drm_mm_search_free_generic(mm, size, alignment, 162 hole_node = drm_mm_search_free_generic(mm, size, alignment,
225 color, 0); 163 color, flags);
226 if (!hole_node) 164 if (!hole_node)
227 return -ENOSPC; 165 return -ENOSPC;
228 166
@@ -231,13 +169,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
231} 169}
232EXPORT_SYMBOL(drm_mm_insert_node_generic); 170EXPORT_SYMBOL(drm_mm_insert_node_generic);
233 171
234int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
235 unsigned long size, unsigned alignment)
236{
237 return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
238}
239EXPORT_SYMBOL(drm_mm_insert_node);
240
241static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 172static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
242 struct drm_mm_node *node, 173 struct drm_mm_node *node,
243 unsigned long size, unsigned alignment, 174 unsigned long size, unsigned alignment,
@@ -290,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
290 } 221 }
291} 222}
292 223
293struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
294 unsigned long size,
295 unsigned alignment,
296 unsigned long color,
297 unsigned long start,
298 unsigned long end,
299 int atomic)
300{
301 struct drm_mm_node *node;
302
303 node = drm_mm_kmalloc(hole_node->mm, atomic);
304 if (unlikely(node == NULL))
305 return NULL;
306
307 drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
308 start, end);
309
310 return node;
311}
312EXPORT_SYMBOL(drm_mm_get_block_range_generic);
313
314/** 224/**
315 * Search for free space and insert a preallocated memory node. Returns 225 * Search for free space and insert a preallocated memory node. Returns
316 * -ENOSPC if no suitable free area is available. This is for range 226 * -ENOSPC if no suitable free area is available. This is for range
@@ -318,13 +228,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
318 */ 228 */
319int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 229int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
320 unsigned long size, unsigned alignment, unsigned long color, 230 unsigned long size, unsigned alignment, unsigned long color,
321 unsigned long start, unsigned long end) 231 unsigned long start, unsigned long end,
232 enum drm_mm_search_flags flags)
322{ 233{
323 struct drm_mm_node *hole_node; 234 struct drm_mm_node *hole_node;
324 235
325 hole_node = drm_mm_search_free_in_range_generic(mm, 236 hole_node = drm_mm_search_free_in_range_generic(mm,
326 size, alignment, color, 237 size, alignment, color,
327 start, end, 0); 238 start, end, flags);
328 if (!hole_node) 239 if (!hole_node)
329 return -ENOSPC; 240 return -ENOSPC;
330 241
@@ -335,14 +246,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
335} 246}
336EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 247EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
337 248
338int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
339 unsigned long size, unsigned alignment,
340 unsigned long start, unsigned long end)
341{
342 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
343}
344EXPORT_SYMBOL(drm_mm_insert_node_in_range);
345
346/** 249/**
347 * Remove a memory node from the allocator. 250 * Remove a memory node from the allocator.
348 */ 251 */
@@ -351,6 +254,9 @@ void drm_mm_remove_node(struct drm_mm_node *node)
351 struct drm_mm *mm = node->mm; 254 struct drm_mm *mm = node->mm;
352 struct drm_mm_node *prev_node; 255 struct drm_mm_node *prev_node;
353 256
257 if (WARN_ON(!node->allocated))
258 return;
259
354 BUG_ON(node->scanned_block || node->scanned_prev_free 260 BUG_ON(node->scanned_block || node->scanned_prev_free
355 || node->scanned_next_free); 261 || node->scanned_next_free);
356 262
@@ -377,28 +283,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
377} 283}
378EXPORT_SYMBOL(drm_mm_remove_node); 284EXPORT_SYMBOL(drm_mm_remove_node);
379 285
380/*
381 * Remove a memory node from the allocator and free the allocated struct
382 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
383 * drm_mm_get_block functions.
384 */
385void drm_mm_put_block(struct drm_mm_node *node)
386{
387
388 struct drm_mm *mm = node->mm;
389
390 drm_mm_remove_node(node);
391
392 spin_lock(&mm->unused_lock);
393 if (mm->num_unused < MM_UNUSED_TARGET) {
394 list_add(&node->node_list, &mm->unused_nodes);
395 ++mm->num_unused;
396 } else
397 kfree(node);
398 spin_unlock(&mm->unused_lock);
399}
400EXPORT_SYMBOL(drm_mm_put_block);
401
402static int check_free_hole(unsigned long start, unsigned long end, 286static int check_free_hole(unsigned long start, unsigned long end,
403 unsigned long size, unsigned alignment) 287 unsigned long size, unsigned alignment)
404{ 288{
@@ -414,11 +298,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
414 return end >= start + size; 298 return end >= start + size;
415} 299}
416 300
417struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 301static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
418 unsigned long size, 302 unsigned long size,
419 unsigned alignment, 303 unsigned alignment,
420 unsigned long color, 304 unsigned long color,
421 bool best_match) 305 enum drm_mm_search_flags flags)
422{ 306{
423 struct drm_mm_node *entry; 307 struct drm_mm_node *entry;
424 struct drm_mm_node *best; 308 struct drm_mm_node *best;
@@ -441,7 +325,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
441 if (!check_free_hole(adj_start, adj_end, size, alignment)) 325 if (!check_free_hole(adj_start, adj_end, size, alignment))
442 continue; 326 continue;
443 327
444 if (!best_match) 328 if (!(flags & DRM_MM_SEARCH_BEST))
445 return entry; 329 return entry;
446 330
447 if (entry->size < best_size) { 331 if (entry->size < best_size) {
@@ -452,15 +336,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
452 336
453 return best; 337 return best;
454} 338}
455EXPORT_SYMBOL(drm_mm_search_free_generic);
456 339
457struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 340static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
458 unsigned long size, 341 unsigned long size,
459 unsigned alignment, 342 unsigned alignment,
460 unsigned long color, 343 unsigned long color,
461 unsigned long start, 344 unsigned long start,
462 unsigned long end, 345 unsigned long end,
463 bool best_match) 346 enum drm_mm_search_flags flags)
464{ 347{
465 struct drm_mm_node *entry; 348 struct drm_mm_node *entry;
466 struct drm_mm_node *best; 349 struct drm_mm_node *best;
@@ -488,7 +371,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
488 if (!check_free_hole(adj_start, adj_end, size, alignment)) 371 if (!check_free_hole(adj_start, adj_end, size, alignment))
489 continue; 372 continue;
490 373
491 if (!best_match) 374 if (!(flags & DRM_MM_SEARCH_BEST))
492 return entry; 375 return entry;
493 376
494 if (entry->size < best_size) { 377 if (entry->size < best_size) {
@@ -499,7 +382,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
499 382
500 return best; 383 return best;
501} 384}
502EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
503 385
504/** 386/**
505 * Moves an allocation. To be used with embedded struct drm_mm_node. 387 * Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -634,8 +516,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
634 * corrupted. 516 * corrupted.
635 * 517 *
636 * When the scan list is empty, the selected memory nodes can be freed. An 518 * When the scan list is empty, the selected memory nodes can be freed. An
637 * immediately following drm_mm_search_free with best_match = 0 will then return 519 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
638 * the just freed block (because its at the top of the free_stack list). 520 * return the just freed block (because its at the top of the free_stack list).
639 * 521 *
640 * Returns one if this block should be evicted, zero otherwise. Will always 522 * Returns one if this block should be evicted, zero otherwise. Will always
641 * return zero when no hole has been found. 523 * return zero when no hole has been found.
@@ -672,10 +554,7 @@ EXPORT_SYMBOL(drm_mm_clean);
672void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 554void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
673{ 555{
674 INIT_LIST_HEAD(&mm->hole_stack); 556 INIT_LIST_HEAD(&mm->hole_stack);
675 INIT_LIST_HEAD(&mm->unused_nodes);
676 mm->num_unused = 0;
677 mm->scanned_blocks = 0; 557 mm->scanned_blocks = 0;
678 spin_lock_init(&mm->unused_lock);
679 558
680 /* Clever trick to avoid a special case in the free hole tracking. */ 559 /* Clever trick to avoid a special case in the free hole tracking. */
681 INIT_LIST_HEAD(&mm->head_node.node_list); 560 INIT_LIST_HEAD(&mm->head_node.node_list);
@@ -695,22 +574,8 @@ EXPORT_SYMBOL(drm_mm_init);
695 574
696void drm_mm_takedown(struct drm_mm * mm) 575void drm_mm_takedown(struct drm_mm * mm)
697{ 576{
698 struct drm_mm_node *entry, *next; 577 WARN(!list_empty(&mm->head_node.node_list),
699 578 "Memory manager not clean during takedown.\n");
700 if (WARN(!list_empty(&mm->head_node.node_list),
701 "Memory manager not clean. Delaying takedown\n")) {
702 return;
703 }
704
705 spin_lock(&mm->unused_lock);
706 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
707 list_del(&entry->node_list);
708 kfree(entry);
709 --mm->num_unused;
710 }
711 spin_unlock(&mm->unused_lock);
712
713 BUG_ON(mm->num_unused != 0);
714} 579}
715EXPORT_SYMBOL(drm_mm_takedown); 580EXPORT_SYMBOL(drm_mm_takedown);
716 581
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index a6729bfe6860..fc2adb62b757 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -596,27 +596,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
596EXPORT_SYMBOL(drm_mode_set_name); 596EXPORT_SYMBOL(drm_mode_set_name);
597 597
598/** 598/**
599 * drm_mode_list_concat - move modes from one list to another
600 * @head: source list
601 * @new: dst list
602 *
603 * LOCKING:
604 * Caller must ensure both lists are locked.
605 *
606 * Move all the modes from @head to @new.
607 */
608void drm_mode_list_concat(struct list_head *head, struct list_head *new)
609{
610
611 struct list_head *entry, *tmp;
612
613 list_for_each_safe(entry, tmp, head) {
614 list_move_tail(entry, new);
615 }
616}
617EXPORT_SYMBOL(drm_mode_list_concat);
618
619/**
620 * drm_mode_width - get the width of a mode 599 * drm_mode_width - get the width of a mode
621 * @mode: mode 600 * @mode: mode
622 * 601 *
@@ -923,43 +902,6 @@ void drm_mode_validate_size(struct drm_device *dev,
923EXPORT_SYMBOL(drm_mode_validate_size); 902EXPORT_SYMBOL(drm_mode_validate_size);
924 903
925/** 904/**
926 * drm_mode_validate_clocks - validate modes against clock limits
927 * @dev: DRM device
928 * @mode_list: list of modes to check
929 * @min: minimum clock rate array
930 * @max: maximum clock rate array
931 * @n_ranges: number of clock ranges (size of arrays)
932 *
933 * LOCKING:
934 * Caller must hold a lock protecting @mode_list.
935 *
936 * Some code may need to check a mode list against the clock limits of the
937 * device in question. This function walks the mode list, testing to make
938 * sure each mode falls within a given range (defined by @min and @max
939 * arrays) and sets @mode->status as needed.
940 */
941void drm_mode_validate_clocks(struct drm_device *dev,
942 struct list_head *mode_list,
943 int *min, int *max, int n_ranges)
944{
945 struct drm_display_mode *mode;
946 int i;
947
948 list_for_each_entry(mode, mode_list, head) {
949 bool good = false;
950 for (i = 0; i < n_ranges; i++) {
951 if (mode->clock >= min[i] && mode->clock <= max[i]) {
952 good = true;
953 break;
954 }
955 }
956 if (!good)
957 mode->status = MODE_CLOCK_RANGE;
958 }
959}
960EXPORT_SYMBOL(drm_mode_validate_clocks);
961
962/**
963 * drm_mode_prune_invalid - remove invalid modes from mode list 905 * drm_mode_prune_invalid - remove invalid modes from mode list
964 * @dev: DRM device 906 * @dev: DRM device
965 * @mode_list: list of modes to check 907 * @mode_list: list of modes to check
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 80c0b2b29801..1f96cee6eee8 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -52,10 +52,8 @@
52drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) 52drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
53{ 53{
54 drm_dma_handle_t *dmah; 54 drm_dma_handle_t *dmah;
55#if 1
56 unsigned long addr; 55 unsigned long addr;
57 size_t sz; 56 size_t sz;
58#endif
59 57
60 /* pci_alloc_consistent only guarantees alignment to the smallest 58 /* pci_alloc_consistent only guarantees alignment to the smallest
61 * PAGE_SIZE order which is greater than or equal to the requested size. 59 * PAGE_SIZE order which is greater than or equal to the requested size.
@@ -97,10 +95,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
97 */ 95 */
98void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 96void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
99{ 97{
100#if 1
101 unsigned long addr; 98 unsigned long addr;
102 size_t sz; 99 size_t sz;
103#endif
104 100
105 if (dmah->vaddr) { 101 if (dmah->vaddr) {
106 /* XXX - Is virt_to_page() legal for consistent mem? */ 102 /* XXX - Is virt_to_page() legal for consistent mem? */
@@ -276,17 +272,26 @@ static int drm_pci_agp_init(struct drm_device *dev)
276 DRM_ERROR("Cannot initialize the agpgart module.\n"); 272 DRM_ERROR("Cannot initialize the agpgart module.\n");
277 return -EINVAL; 273 return -EINVAL;
278 } 274 }
279 if (drm_core_has_MTRR(dev)) { 275 if (dev->agp) {
280 if (dev->agp) 276 dev->agp->agp_mtrr = arch_phys_wc_add(
281 dev->agp->agp_mtrr = arch_phys_wc_add( 277 dev->agp->agp_info.aper_base,
282 dev->agp->agp_info.aper_base, 278 dev->agp->agp_info.aper_size *
283 dev->agp->agp_info.aper_size * 279 1024 * 1024);
284 1024 * 1024);
285 } 280 }
286 } 281 }
287 return 0; 282 return 0;
288} 283}
289 284
285static void drm_pci_agp_destroy(struct drm_device *dev)
286{
287 if (drm_core_has_AGP(dev) && dev->agp) {
288 arch_phys_wc_del(dev->agp->agp_mtrr);
289 drm_agp_clear(dev);
290 drm_agp_destroy(dev->agp);
291 dev->agp = NULL;
292 }
293}
294
290static struct drm_bus drm_pci_bus = { 295static struct drm_bus drm_pci_bus = {
291 .bus_type = DRIVER_BUS_PCI, 296 .bus_type = DRIVER_BUS_PCI,
292 .get_irq = drm_pci_get_irq, 297 .get_irq = drm_pci_get_irq,
@@ -295,6 +300,7 @@ static struct drm_bus drm_pci_bus = {
295 .set_unique = drm_pci_set_unique, 300 .set_unique = drm_pci_set_unique,
296 .irq_by_busid = drm_pci_irq_by_busid, 301 .irq_by_busid = drm_pci_irq_by_busid,
297 .agp_init = drm_pci_agp_init, 302 .agp_init = drm_pci_agp_init,
303 .agp_destroy = drm_pci_agp_destroy,
298}; 304};
299 305
300/** 306/**
@@ -348,6 +354,12 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
348 goto err_g2; 354 goto err_g2;
349 } 355 }
350 356
357 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
358 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
359 if (ret)
360 goto err_g21;
361 }
362
351 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) 363 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
352 goto err_g3; 364 goto err_g3;
353 365
@@ -377,6 +389,9 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
377err_g4: 389err_g4:
378 drm_put_minor(&dev->primary); 390 drm_put_minor(&dev->primary);
379err_g3: 391err_g3:
392 if (dev->render)
393 drm_put_minor(&dev->render);
394err_g21:
380 if (drm_core_check_feature(dev, DRIVER_MODESET)) 395 if (drm_core_check_feature(dev, DRIVER_MODESET))
381 drm_put_minor(&dev->control); 396 drm_put_minor(&dev->control);
382err_g2: 397err_g2:
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index b8a282ea8751..f7a18c6ba4c4 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -28,7 +28,7 @@
28#include <linux/export.h> 28#include <linux/export.h>
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30 30
31/** 31/*
32 * Register. 32 * Register.
33 * 33 *
34 * \param platdev - Platform device struture 34 * \param platdev - Platform device struture
@@ -39,8 +39,8 @@
39 * Try and register, if we fail to register, backout previous work. 39 * Try and register, if we fail to register, backout previous work.
40 */ 40 */
41 41
42int drm_get_platform_dev(struct platform_device *platdev, 42static int drm_get_platform_dev(struct platform_device *platdev,
43 struct drm_driver *driver) 43 struct drm_driver *driver)
44{ 44{
45 struct drm_device *dev; 45 struct drm_device *dev;
46 int ret; 46 int ret;
@@ -69,6 +69,12 @@ int drm_get_platform_dev(struct platform_device *platdev,
69 goto err_g1; 69 goto err_g1;
70 } 70 }
71 71
72 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
73 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
74 if (ret)
75 goto err_g11;
76 }
77
72 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); 78 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
73 if (ret) 79 if (ret)
74 goto err_g2; 80 goto err_g2;
@@ -100,6 +106,9 @@ int drm_get_platform_dev(struct platform_device *platdev,
100err_g3: 106err_g3:
101 drm_put_minor(&dev->primary); 107 drm_put_minor(&dev->primary);
102err_g2: 108err_g2:
109 if (dev->render)
110 drm_put_minor(&dev->render);
111err_g11:
103 if (drm_core_check_feature(dev, DRIVER_MODESET)) 112 if (drm_core_check_feature(dev, DRIVER_MODESET))
104 drm_put_minor(&dev->control); 113 drm_put_minor(&dev->control);
105err_g1: 114err_g1:
@@ -107,7 +116,6 @@ err_g1:
107 mutex_unlock(&drm_global_mutex); 116 mutex_unlock(&drm_global_mutex);
108 return ret; 117 return ret;
109} 118}
110EXPORT_SYMBOL(drm_get_platform_dev);
111 119
112static int drm_platform_get_irq(struct drm_device *dev) 120static int drm_platform_get_irq(struct drm_device *dev)
113{ 121{
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 85e450e3241c..276d470f7b3e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -83,6 +83,34 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
83 return 0; 83 return 0;
84} 84}
85 85
86static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
87 uint32_t handle)
88{
89 struct drm_prime_member *member;
90
91 list_for_each_entry(member, &prime_fpriv->head, entry) {
92 if (member->handle == handle)
93 return member->dma_buf;
94 }
95
96 return NULL;
97}
98
99static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
100 struct dma_buf *dma_buf,
101 uint32_t *handle)
102{
103 struct drm_prime_member *member;
104
105 list_for_each_entry(member, &prime_fpriv->head, entry) {
106 if (member->dma_buf == dma_buf) {
107 *handle = member->handle;
108 return 0;
109 }
110 }
111 return -ENOENT;
112}
113
86static int drm_gem_map_attach(struct dma_buf *dma_buf, 114static int drm_gem_map_attach(struct dma_buf *dma_buf,
87 struct device *target_dev, 115 struct device *target_dev,
88 struct dma_buf_attachment *attach) 116 struct dma_buf_attachment *attach)
@@ -131,9 +159,8 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf,
131 attach->priv = NULL; 159 attach->priv = NULL;
132} 160}
133 161
134static void drm_prime_remove_buf_handle_locked( 162void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
135 struct drm_prime_file_private *prime_fpriv, 163 struct dma_buf *dma_buf)
136 struct dma_buf *dma_buf)
137{ 164{
138 struct drm_prime_member *member, *safe; 165 struct drm_prime_member *member, *safe;
139 166
@@ -167,8 +194,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
167 if (WARN_ON(prime_attach->dir != DMA_NONE)) 194 if (WARN_ON(prime_attach->dir != DMA_NONE))
168 return ERR_PTR(-EBUSY); 195 return ERR_PTR(-EBUSY);
169 196
170 mutex_lock(&obj->dev->struct_mutex);
171
172 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 197 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
173 198
174 if (!IS_ERR(sgt)) { 199 if (!IS_ERR(sgt)) {
@@ -182,7 +207,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
182 } 207 }
183 } 208 }
184 209
185 mutex_unlock(&obj->dev->struct_mutex);
186 return sgt; 210 return sgt;
187} 211}
188 212
@@ -192,16 +216,14 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
192 /* nothing to be done here */ 216 /* nothing to be done here */
193} 217}
194 218
195static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 219void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
196{ 220{
197 struct drm_gem_object *obj = dma_buf->priv; 221 struct drm_gem_object *obj = dma_buf->priv;
198 222
199 if (obj->export_dma_buf == dma_buf) { 223 /* drop the reference on the export fd holds */
200 /* drop the reference on the export fd holds */ 224 drm_gem_object_unreference_unlocked(obj);
201 obj->export_dma_buf = NULL;
202 drm_gem_object_unreference_unlocked(obj);
203 }
204} 225}
226EXPORT_SYMBOL(drm_gem_dmabuf_release);
205 227
206static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 228static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
207{ 229{
@@ -300,62 +322,107 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
300} 322}
301EXPORT_SYMBOL(drm_gem_prime_export); 323EXPORT_SYMBOL(drm_gem_prime_export);
302 324
325static struct dma_buf *export_and_register_object(struct drm_device *dev,
326 struct drm_gem_object *obj,
327 uint32_t flags)
328{
329 struct dma_buf *dmabuf;
330
331 /* prevent races with concurrent gem_close. */
332 if (obj->handle_count == 0) {
333 dmabuf = ERR_PTR(-ENOENT);
334 return dmabuf;
335 }
336
337 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
338 if (IS_ERR(dmabuf)) {
339 /* normally the created dma-buf takes ownership of the ref,
340 * but if that fails then drop the ref
341 */
342 return dmabuf;
343 }
344
345 /*
346 * Note that callers do not need to clean up the export cache
347 * since the check for obj->handle_count guarantees that someone
348 * will clean it up.
349 */
350 obj->dma_buf = dmabuf;
351 get_dma_buf(obj->dma_buf);
352 /* Grab a new ref since the callers is now used by the dma-buf */
353 drm_gem_object_reference(obj);
354
355 return dmabuf;
356}
357
303int drm_gem_prime_handle_to_fd(struct drm_device *dev, 358int drm_gem_prime_handle_to_fd(struct drm_device *dev,
304 struct drm_file *file_priv, uint32_t handle, uint32_t flags, 359 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
305 int *prime_fd) 360 int *prime_fd)
306{ 361{
307 struct drm_gem_object *obj; 362 struct drm_gem_object *obj;
308 void *buf;
309 int ret = 0; 363 int ret = 0;
310 struct dma_buf *dmabuf; 364 struct dma_buf *dmabuf;
311 365
366 mutex_lock(&file_priv->prime.lock);
312 obj = drm_gem_object_lookup(dev, file_priv, handle); 367 obj = drm_gem_object_lookup(dev, file_priv, handle);
313 if (!obj) 368 if (!obj) {
314 return -ENOENT; 369 ret = -ENOENT;
370 goto out_unlock;
371 }
315 372
316 mutex_lock(&file_priv->prime.lock); 373 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
374 if (dmabuf) {
375 get_dma_buf(dmabuf);
376 goto out_have_handle;
377 }
378
379 mutex_lock(&dev->object_name_lock);
317 /* re-export the original imported object */ 380 /* re-export the original imported object */
318 if (obj->import_attach) { 381 if (obj->import_attach) {
319 dmabuf = obj->import_attach->dmabuf; 382 dmabuf = obj->import_attach->dmabuf;
383 get_dma_buf(dmabuf);
320 goto out_have_obj; 384 goto out_have_obj;
321 } 385 }
322 386
323 if (obj->export_dma_buf) { 387 if (obj->dma_buf) {
324 dmabuf = obj->export_dma_buf; 388 get_dma_buf(obj->dma_buf);
389 dmabuf = obj->dma_buf;
325 goto out_have_obj; 390 goto out_have_obj;
326 } 391 }
327 392
328 buf = dev->driver->gem_prime_export(dev, obj, flags); 393 dmabuf = export_and_register_object(dev, obj, flags);
329 if (IS_ERR(buf)) { 394 if (IS_ERR(dmabuf)) {
330 /* normally the created dma-buf takes ownership of the ref, 395 /* normally the created dma-buf takes ownership of the ref,
331 * but if that fails then drop the ref 396 * but if that fails then drop the ref
332 */ 397 */
333 ret = PTR_ERR(buf); 398 ret = PTR_ERR(dmabuf);
399 mutex_unlock(&dev->object_name_lock);
334 goto out; 400 goto out;
335 } 401 }
336 obj->export_dma_buf = buf;
337 402
338 /* if we've exported this buffer the cheat and add it to the import list 403out_have_obj:
339 * so we get the correct handle back 404 /*
405 * If we've exported this buffer then cheat and add it to the import list
406 * so we get the correct handle back. We must do this under the
407 * protection of dev->object_name_lock to ensure that a racing gem close
408 * ioctl doesn't miss to remove this buffer handle from the cache.
340 */ 409 */
341 ret = drm_prime_add_buf_handle(&file_priv->prime, 410 ret = drm_prime_add_buf_handle(&file_priv->prime,
342 obj->export_dma_buf, handle); 411 dmabuf, handle);
412 mutex_unlock(&dev->object_name_lock);
343 if (ret) 413 if (ret)
344 goto fail_put_dmabuf; 414 goto fail_put_dmabuf;
345 415
346 ret = dma_buf_fd(buf, flags); 416out_have_handle:
347 if (ret < 0)
348 goto fail_rm_handle;
349
350 *prime_fd = ret;
351 mutex_unlock(&file_priv->prime.lock);
352 return 0;
353
354out_have_obj:
355 get_dma_buf(dmabuf);
356 ret = dma_buf_fd(dmabuf, flags); 417 ret = dma_buf_fd(dmabuf, flags);
418 /*
419 * We must _not_ remove the buffer from the handle cache since the newly
420 * created dma buf is already linked in the global obj->dma_buf pointer,
421 * and that is invariant as long as a userspace gem handle exists.
422 * Closing the handle will clean out the cache anyway, so we don't leak.
423 */
357 if (ret < 0) { 424 if (ret < 0) {
358 dma_buf_put(dmabuf); 425 goto fail_put_dmabuf;
359 } else { 426 } else {
360 *prime_fd = ret; 427 *prime_fd = ret;
361 ret = 0; 428 ret = 0;
@@ -363,15 +430,13 @@ out_have_obj:
363 430
364 goto out; 431 goto out;
365 432
366fail_rm_handle:
367 drm_prime_remove_buf_handle_locked(&file_priv->prime, buf);
368fail_put_dmabuf: 433fail_put_dmabuf:
369 /* clear NOT to be checked when releasing dma_buf */ 434 dma_buf_put(dmabuf);
370 obj->export_dma_buf = NULL;
371 dma_buf_put(buf);
372out: 435out:
373 drm_gem_object_unreference_unlocked(obj); 436 drm_gem_object_unreference_unlocked(obj);
437out_unlock:
374 mutex_unlock(&file_priv->prime.lock); 438 mutex_unlock(&file_priv->prime.lock);
439
375 return ret; 440 return ret;
376} 441}
377EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 442EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -446,19 +511,26 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
446 511
447 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 512 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
448 dma_buf, handle); 513 dma_buf, handle);
449 if (!ret) { 514 if (ret == 0)
450 ret = 0;
451 goto out_put; 515 goto out_put;
452 }
453 516
454 /* never seen this one, need to import */ 517 /* never seen this one, need to import */
518 mutex_lock(&dev->object_name_lock);
455 obj = dev->driver->gem_prime_import(dev, dma_buf); 519 obj = dev->driver->gem_prime_import(dev, dma_buf);
456 if (IS_ERR(obj)) { 520 if (IS_ERR(obj)) {
457 ret = PTR_ERR(obj); 521 ret = PTR_ERR(obj);
458 goto out_put; 522 goto out_unlock;
459 } 523 }
460 524
461 ret = drm_gem_handle_create(file_priv, obj, handle); 525 if (obj->dma_buf) {
526 WARN_ON(obj->dma_buf != dma_buf);
527 } else {
528 obj->dma_buf = dma_buf;
529 get_dma_buf(dma_buf);
530 }
531
532 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
533 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
462 drm_gem_object_unreference_unlocked(obj); 534 drm_gem_object_unreference_unlocked(obj);
463 if (ret) 535 if (ret)
464 goto out_put; 536 goto out_put;
@@ -478,7 +550,9 @@ fail:
478 /* hmm, if driver attached, we are relying on the free-object path 550 /* hmm, if driver attached, we are relying on the free-object path
479 * to detach.. which seems ok.. 551 * to detach.. which seems ok..
480 */ 552 */
481 drm_gem_object_handle_unreference_unlocked(obj); 553 drm_gem_handle_delete(file_priv, *handle);
554out_unlock:
555 mutex_unlock(&dev->object_name_lock);
482out_put: 556out_put:
483 dma_buf_put(dma_buf); 557 dma_buf_put(dma_buf);
484 mutex_unlock(&file_priv->prime.lock); 558 mutex_unlock(&file_priv->prime.lock);
@@ -618,25 +692,3 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
618 WARN_ON(!list_empty(&prime_fpriv->head)); 692 WARN_ON(!list_empty(&prime_fpriv->head));
619} 693}
620EXPORT_SYMBOL(drm_prime_destroy_file_private); 694EXPORT_SYMBOL(drm_prime_destroy_file_private);
621
622int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
623{
624 struct drm_prime_member *member;
625
626 list_for_each_entry(member, &prime_fpriv->head, entry) {
627 if (member->dma_buf == dma_buf) {
628 *handle = member->handle;
629 return 0;
630 }
631 }
632 return -ENOENT;
633}
634EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
635
636void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
637{
638 mutex_lock(&prime_fpriv->lock);
639 drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf);
640 mutex_unlock(&prime_fpriv->lock);
641}
642EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
deleted file mode 100644
index d7f2324b4fb1..000000000000
--- a/drivers/gpu/drm/drm_proc.c
+++ /dev/null
@@ -1,209 +0,0 @@
1/**
2 * \file drm_proc.c
3 * /proc support for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 *
8 * \par Acknowledgements:
9 * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
10 * the problem with the proc files not outputting all their information.
11 */
12
13/*
14 * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
15 *
16 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
17 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
18 * All Rights Reserved.
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a
21 * copy of this software and associated documentation files (the "Software"),
22 * to deal in the Software without restriction, including without limitation
23 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24 * and/or sell copies of the Software, and to permit persons to whom the
25 * Software is furnished to do so, subject to the following conditions:
26 *
27 * The above copyright notice and this permission notice (including the next
28 * paragraph) shall be included in all copies or substantial portions of the
29 * Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
34 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
35 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
36 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
37 * OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <linux/seq_file.h>
41#include <linux/slab.h>
42#include <linux/export.h>
43#include <drm/drmP.h>
44
45/***************************************************
46 * Initialization, etc.
47 **************************************************/
48
49/**
50 * Proc file list.
51 */
52static const struct drm_info_list drm_proc_list[] = {
53 {"name", drm_name_info, 0},
54 {"vm", drm_vm_info, 0},
55 {"clients", drm_clients_info, 0},
56 {"bufs", drm_bufs_info, 0},
57 {"gem_names", drm_gem_name_info, DRIVER_GEM},
58#if DRM_DEBUG_CODE
59 {"vma", drm_vma_info, 0},
60#endif
61};
62#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
63
64static int drm_proc_open(struct inode *inode, struct file *file)
65{
66 struct drm_info_node* node = PDE_DATA(inode);
67
68 return single_open(file, node->info_ent->show, node);
69}
70
71static const struct file_operations drm_proc_fops = {
72 .owner = THIS_MODULE,
73 .open = drm_proc_open,
74 .read = seq_read,
75 .llseek = seq_lseek,
76 .release = single_release,
77};
78
79
80/**
81 * Initialize a given set of proc files for a device
82 *
83 * \param files The array of files to create
84 * \param count The number of files given
85 * \param root DRI proc dir entry.
86 * \param minor device minor number
87 * \return Zero on success, non-zero on failure
88 *
89 * Create a given set of proc files represented by an array of
90 * gdm_proc_lists in the given root directory.
91 */
92static int drm_proc_create_files(const struct drm_info_list *files, int count,
93 struct proc_dir_entry *root, struct drm_minor *minor)
94{
95 struct drm_device *dev = minor->dev;
96 struct proc_dir_entry *ent;
97 struct drm_info_node *tmp;
98 int i;
99
100 for (i = 0; i < count; i++) {
101 u32 features = files[i].driver_features;
102
103 if (features != 0 &&
104 (dev->driver->driver_features & features) != features)
105 continue;
106
107 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
108 if (!tmp)
109 return -1;
110
111 tmp->minor = minor;
112 tmp->info_ent = &files[i];
113 list_add(&tmp->list, &minor->proc_nodes.list);
114
115 ent = proc_create_data(files[i].name, S_IRUGO, root,
116 &drm_proc_fops, tmp);
117 if (!ent) {
118 DRM_ERROR("Cannot create /proc/dri/%u/%s\n",
119 minor->index, files[i].name);
120 list_del(&tmp->list);
121 kfree(tmp);
122 return -1;
123 }
124 }
125 return 0;
126}
127
128/**
129 * Initialize the DRI proc filesystem for a device
130 *
131 * \param dev DRM device
132 * \param root DRI proc dir entry.
133 * \param dev_root resulting DRI device proc dir entry.
134 * \return root entry pointer on success, or NULL on failure.
135 *
136 * Create the DRI proc root entry "/proc/dri", the device proc root entry
137 * "/proc/dri/%minor%/", and each entry in proc_list as
138 * "/proc/dri/%minor%/%name%".
139 */
140int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root)
141{
142 char name[12];
143 int ret;
144
145 INIT_LIST_HEAD(&minor->proc_nodes.list);
146 sprintf(name, "%u", minor->index);
147 minor->proc_root = proc_mkdir(name, root);
148 if (!minor->proc_root) {
149 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
150 return -1;
151 }
152
153 ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
154 minor->proc_root, minor);
155 if (ret) {
156 remove_proc_subtree(name, root);
157 minor->proc_root = NULL;
158 DRM_ERROR("Failed to create core drm proc files\n");
159 return ret;
160 }
161
162 return 0;
163}
164
165static int drm_proc_remove_files(const struct drm_info_list *files, int count,
166 struct drm_minor *minor)
167{
168 struct list_head *pos, *q;
169 struct drm_info_node *tmp;
170 int i;
171
172 for (i = 0; i < count; i++) {
173 list_for_each_safe(pos, q, &minor->proc_nodes.list) {
174 tmp = list_entry(pos, struct drm_info_node, list);
175 if (tmp->info_ent == &files[i]) {
176 remove_proc_entry(files[i].name,
177 minor->proc_root);
178 list_del(pos);
179 kfree(tmp);
180 }
181 }
182 }
183 return 0;
184}
185
186/**
187 * Cleanup the proc filesystem resources.
188 *
189 * \param minor device minor number.
190 * \param root DRI proc dir entry.
191 * \param dev_root DRI device proc dir entry.
192 * \return always zero.
193 *
194 * Remove all proc entries created by proc_init().
195 */
196int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
197{
198 char name[64];
199
200 if (!root || !minor->proc_root)
201 return 0;
202
203 drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
204
205 sprintf(name, "%d", minor->index);
206 remove_proc_subtree(name, root);
207 return 0;
208}
209
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d87f60bbc330..1c78406f6e71 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -46,7 +46,7 @@ static inline void *drm_vmalloc_dma(unsigned long size)
46#endif 46#endif
47} 47}
48 48
49void drm_sg_cleanup(struct drm_sg_mem * entry) 49static void drm_sg_cleanup(struct drm_sg_mem * entry)
50{ 50{
51 struct page *page; 51 struct page *page;
52 int i; 52 int i;
@@ -64,19 +64,32 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
64 kfree(entry); 64 kfree(entry);
65} 65}
66 66
67void drm_legacy_sg_cleanup(struct drm_device *dev)
68{
69 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
70 !drm_core_check_feature(dev, DRIVER_MODESET)) {
71 drm_sg_cleanup(dev->sg);
72 dev->sg = NULL;
73 }
74}
67#ifdef _LP64 75#ifdef _LP64
68# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) 76# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
69#else 77#else
70# define ScatterHandle(x) (unsigned int)(x) 78# define ScatterHandle(x) (unsigned int)(x)
71#endif 79#endif
72 80
73int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) 81int drm_sg_alloc(struct drm_device *dev, void *data,
82 struct drm_file *file_priv)
74{ 83{
84 struct drm_scatter_gather *request = data;
75 struct drm_sg_mem *entry; 85 struct drm_sg_mem *entry;
76 unsigned long pages, i, j; 86 unsigned long pages, i, j;
77 87
78 DRM_DEBUG("\n"); 88 DRM_DEBUG("\n");
79 89
90 if (drm_core_check_feature(dev, DRIVER_MODESET))
91 return -EINVAL;
92
80 if (!drm_core_check_feature(dev, DRIVER_SG)) 93 if (!drm_core_check_feature(dev, DRIVER_SG))
81 return -EINVAL; 94 return -EINVAL;
82 95
@@ -181,21 +194,15 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
181 return -ENOMEM; 194 return -ENOMEM;
182} 195}
183 196
184int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
185 struct drm_file *file_priv)
186{
187 struct drm_scatter_gather *request = data;
188
189 return drm_sg_alloc(dev, request);
190
191}
192
193int drm_sg_free(struct drm_device *dev, void *data, 197int drm_sg_free(struct drm_device *dev, void *data,
194 struct drm_file *file_priv) 198 struct drm_file *file_priv)
195{ 199{
196 struct drm_scatter_gather *request = data; 200 struct drm_scatter_gather *request = data;
197 struct drm_sg_mem *entry; 201 struct drm_sg_mem *entry;
198 202
203 if (drm_core_check_feature(dev, DRIVER_MODESET))
204 return -EINVAL;
205
199 if (!drm_core_check_feature(dev, DRIVER_SG)) 206 if (!drm_core_check_feature(dev, DRIVER_SG))
200 return -EINVAL; 207 return -EINVAL;
201 208
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 327ca19cda85..e7eb0276f7f1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,6 +40,9 @@
40unsigned int drm_debug = 0; /* 1 to enable debug output */ 40unsigned int drm_debug = 0; /* 1 to enable debug output */
41EXPORT_SYMBOL(drm_debug); 41EXPORT_SYMBOL(drm_debug);
42 42
43unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
44EXPORT_SYMBOL(drm_rnodes);
45
43unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 46unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
44EXPORT_SYMBOL(drm_vblank_offdelay); 47EXPORT_SYMBOL(drm_vblank_offdelay);
45 48
@@ -56,11 +59,13 @@ MODULE_AUTHOR(CORE_AUTHOR);
56MODULE_DESCRIPTION(CORE_DESC); 59MODULE_DESCRIPTION(CORE_DESC);
57MODULE_LICENSE("GPL and additional rights"); 60MODULE_LICENSE("GPL and additional rights");
58MODULE_PARM_DESC(debug, "Enable debug output"); 61MODULE_PARM_DESC(debug, "Enable debug output");
62MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
59MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); 63MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
60MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 64MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
61MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); 65MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
62 66
63module_param_named(debug, drm_debug, int, 0600); 67module_param_named(debug, drm_debug, int, 0600);
68module_param_named(rnodes, drm_rnodes, int, 0600);
64module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 69module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
65module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 70module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
66module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 71module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
@@ -68,7 +73,6 @@ module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
68struct idr drm_minors_idr; 73struct idr drm_minors_idr;
69 74
70struct class *drm_class; 75struct class *drm_class;
71struct proc_dir_entry *drm_proc_root;
72struct dentry *drm_debugfs_root; 76struct dentry *drm_debugfs_root;
73 77
74int drm_err(const char *func, const char *format, ...) 78int drm_err(const char *func, const char *format, ...)
@@ -113,12 +117,12 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
113 int base = 0, limit = 63; 117 int base = 0, limit = 63;
114 118
115 if (type == DRM_MINOR_CONTROL) { 119 if (type == DRM_MINOR_CONTROL) {
116 base += 64; 120 base += 64;
117 limit = base + 127; 121 limit = base + 63;
118 } else if (type == DRM_MINOR_RENDER) { 122 } else if (type == DRM_MINOR_RENDER) {
119 base += 128; 123 base += 128;
120 limit = base + 255; 124 limit = base + 63;
121 } 125 }
122 126
123 mutex_lock(&dev->struct_mutex); 127 mutex_lock(&dev->struct_mutex);
124 ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL); 128 ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
@@ -288,13 +292,7 @@ int drm_fill_in_dev(struct drm_device *dev,
288 goto error_out_unreg; 292 goto error_out_unreg;
289 } 293 }
290 294
291 295 drm_legacy_ctxbitmap_init(dev);
292
293 retcode = drm_ctxbitmap_init(dev);
294 if (retcode) {
295 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
296 goto error_out_unreg;
297 }
298 296
299 if (driver->driver_features & DRIVER_GEM) { 297 if (driver->driver_features & DRIVER_GEM) {
300 retcode = drm_gem_init(dev); 298 retcode = drm_gem_init(dev);
@@ -321,9 +319,8 @@ EXPORT_SYMBOL(drm_fill_in_dev);
321 * \param sec-minor structure to hold the assigned minor 319 * \param sec-minor structure to hold the assigned minor
322 * \return negative number on failure. 320 * \return negative number on failure.
323 * 321 *
324 * Search an empty entry and initialize it to the given parameters, and 322 * Search an empty entry and initialize it to the given parameters. This
325 * create the proc init entry via proc_init(). This routines assigns 323 * routines assigns minor numbers to secondary heads of multi-headed cards
326 * minor numbers to secondary heads of multi-headed cards
327 */ 324 */
328int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) 325int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
329{ 326{
@@ -351,20 +348,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
351 348
352 idr_replace(&drm_minors_idr, new_minor, minor_id); 349 idr_replace(&drm_minors_idr, new_minor, minor_id);
353 350
354 if (type == DRM_MINOR_LEGACY) {
355 ret = drm_proc_init(new_minor, drm_proc_root);
356 if (ret) {
357 DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
358 goto err_mem;
359 }
360 } else
361 new_minor->proc_root = NULL;
362
363#if defined(CONFIG_DEBUG_FS) 351#if defined(CONFIG_DEBUG_FS)
364 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); 352 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
365 if (ret) { 353 if (ret) {
366 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 354 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
367 goto err_g2; 355 goto err_mem;
368 } 356 }
369#endif 357#endif
370 358
@@ -372,7 +360,7 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
372 if (ret) { 360 if (ret) {
373 printk(KERN_ERR 361 printk(KERN_ERR
374 "DRM: Error sysfs_device_add.\n"); 362 "DRM: Error sysfs_device_add.\n");
375 goto err_g2; 363 goto err_debugfs;
376 } 364 }
377 *minor = new_minor; 365 *minor = new_minor;
378 366
@@ -380,10 +368,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
380 return 0; 368 return 0;
381 369
382 370
383err_g2: 371err_debugfs:
384 if (new_minor->type == DRM_MINOR_LEGACY) 372#if defined(CONFIG_DEBUG_FS)
385 drm_proc_cleanup(new_minor, drm_proc_root); 373 drm_debugfs_cleanup(new_minor);
386err_mem: 374err_mem:
375#endif
387 kfree(new_minor); 376 kfree(new_minor);
388err_idr: 377err_idr:
389 idr_remove(&drm_minors_idr, minor_id); 378 idr_remove(&drm_minors_idr, minor_id);
@@ -397,10 +386,6 @@ EXPORT_SYMBOL(drm_get_minor);
397 * 386 *
398 * \param sec_minor - structure to be released 387 * \param sec_minor - structure to be released
399 * \return always zero 388 * \return always zero
400 *
401 * Cleans up the proc resources. Not legal for this to be the
402 * last minor released.
403 *
404 */ 389 */
405int drm_put_minor(struct drm_minor **minor_p) 390int drm_put_minor(struct drm_minor **minor_p)
406{ 391{
@@ -408,8 +393,6 @@ int drm_put_minor(struct drm_minor **minor_p)
408 393
409 DRM_DEBUG("release secondary minor %d\n", minor->index); 394 DRM_DEBUG("release secondary minor %d\n", minor->index);
410 395
411 if (minor->type == DRM_MINOR_LEGACY)
412 drm_proc_cleanup(minor, drm_proc_root);
413#if defined(CONFIG_DEBUG_FS) 396#if defined(CONFIG_DEBUG_FS)
414 drm_debugfs_cleanup(minor); 397 drm_debugfs_cleanup(minor);
415#endif 398#endif
@@ -451,16 +434,11 @@ void drm_put_dev(struct drm_device *dev)
451 434
452 drm_lastclose(dev); 435 drm_lastclose(dev);
453 436
454 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
455 arch_phys_wc_del(dev->agp->agp_mtrr);
456
457 if (dev->driver->unload) 437 if (dev->driver->unload)
458 dev->driver->unload(dev); 438 dev->driver->unload(dev);
459 439
460 if (drm_core_has_AGP(dev) && dev->agp) { 440 if (dev->driver->bus->agp_destroy)
461 kfree(dev->agp); 441 dev->driver->bus->agp_destroy(dev);
462 dev->agp = NULL;
463 }
464 442
465 drm_vblank_cleanup(dev); 443 drm_vblank_cleanup(dev);
466 444
@@ -468,11 +446,14 @@ void drm_put_dev(struct drm_device *dev)
468 drm_rmmap(dev, r_list->map); 446 drm_rmmap(dev, r_list->map);
469 drm_ht_remove(&dev->map_hash); 447 drm_ht_remove(&dev->map_hash);
470 448
471 drm_ctxbitmap_cleanup(dev); 449 drm_legacy_ctxbitmap_cleanup(dev);
472 450
473 if (drm_core_check_feature(dev, DRIVER_MODESET)) 451 if (drm_core_check_feature(dev, DRIVER_MODESET))
474 drm_put_minor(&dev->control); 452 drm_put_minor(&dev->control);
475 453
454 if (dev->render)
455 drm_put_minor(&dev->render);
456
476 if (driver->driver_features & DRIVER_GEM) 457 if (driver->driver_features & DRIVER_GEM)
477 drm_gem_destroy(dev); 458 drm_gem_destroy(dev);
478 459
@@ -489,6 +470,8 @@ void drm_unplug_dev(struct drm_device *dev)
489 /* for a USB device */ 470 /* for a USB device */
490 if (drm_core_check_feature(dev, DRIVER_MODESET)) 471 if (drm_core_check_feature(dev, DRIVER_MODESET))
491 drm_unplug_minor(dev->control); 472 drm_unplug_minor(dev->control);
473 if (dev->render)
474 drm_unplug_minor(dev->render);
492 drm_unplug_minor(dev->primary); 475 drm_unplug_minor(dev->primary);
493 476
494 mutex_lock(&drm_global_mutex); 477 mutex_lock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 34a156f0c336..87664723b9ce 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -33,6 +33,12 @@ int drm_get_usb_dev(struct usb_interface *interface,
33 if (ret) 33 if (ret)
34 goto err_g1; 34 goto err_g1;
35 35
36 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
37 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
38 if (ret)
39 goto err_g11;
40 }
41
36 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); 42 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
37 if (ret) 43 if (ret)
38 goto err_g2; 44 goto err_g2;
@@ -62,6 +68,9 @@ int drm_get_usb_dev(struct usb_interface *interface,
62err_g3: 68err_g3:
63 drm_put_minor(&dev->primary); 69 drm_put_minor(&dev->primary);
64err_g2: 70err_g2:
71 if (dev->render)
72 drm_put_minor(&dev->render);
73err_g11:
65 drm_put_minor(&dev->control); 74 drm_put_minor(&dev->control);
66err_g1: 75err_g1:
67 kfree(dev); 76 kfree(dev);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index feb20035b2c4..b5c5af7328df 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -251,8 +251,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
251 switch (map->type) { 251 switch (map->type) {
252 case _DRM_REGISTERS: 252 case _DRM_REGISTERS:
253 case _DRM_FRAME_BUFFER: 253 case _DRM_FRAME_BUFFER:
254 if (drm_core_has_MTRR(dev)) 254 arch_phys_wc_del(map->mtrr);
255 arch_phys_wc_del(map->mtrr);
256 iounmap(map->handle); 255 iounmap(map->handle);
257 break; 256 break;
258 case _DRM_SHM: 257 case _DRM_SHM:
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
new file mode 100644
index 000000000000..63b471205072
--- /dev/null
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -0,0 +1,436 @@
1/*
2 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
3 * Copyright (c) 2012 David Airlie <airlied@linux.ie>
4 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <drm/drmP.h>
26#include <drm/drm_mm.h>
27#include <drm/drm_vma_manager.h>
28#include <linux/fs.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/rbtree.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/types.h>
35
36/**
37 * DOC: vma offset manager
38 *
39 * The vma-manager is responsible to map arbitrary driver-dependent memory
40 * regions into the linear user address-space. It provides offsets to the
41 * caller which can then be used on the address_space of the drm-device. It
42 * takes care to not overlap regions, size them appropriately and to not
43 * confuse mm-core by inconsistent fake vm_pgoff fields.
44 * Drivers shouldn't use this for object placement in VMEM. This manager should
45 * only be used to manage mappings into linear user-space VMs.
46 *
47 * We use drm_mm as backend to manage object allocations. But it is highly
48 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
49 * speed up offset lookups.
50 *
51 * You must not use multiple offset managers on a single address_space.
52 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
53 * no longer be linear. Please use VM_NONLINEAR in that case and implement your
54 * own offset managers.
55 *
56 * This offset manager works on page-based addresses. That is, every argument
57 * and return code (with the exception of drm_vma_node_offset_addr()) is given
58 * in number of pages, not number of bytes. That means, object sizes and offsets
59 * must always be page-aligned (as usual).
60 * If you want to get a valid byte-based user-space address for a given offset,
61 * please see drm_vma_node_offset_addr().
62 *
63 * Additionally to offset management, the vma offset manager also handles access
64 * management. For every open-file context that is allowed to access a given
65 * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
66 * open-file with the offset of the node will fail with -EACCES. To revoke
67 * access again, use drm_vma_node_revoke(). However, the caller is responsible
68 * for destroying already existing mappings, if required.
69 */
70
71/**
72 * drm_vma_offset_manager_init - Initialize new offset-manager
73 * @mgr: Manager object
74 * @page_offset: Offset of available memory area (page-based)
75 * @size: Size of available address space range (page-based)
76 *
77 * Initialize a new offset-manager. The offset and area size available for the
78 * manager are given as @page_offset and @size. Both are interpreted as
79 * page-numbers, not bytes.
80 *
81 * Adding/removing nodes from the manager is locked internally and protected
82 * against concurrent access. However, node allocation and destruction is left
83 * for the caller. While calling into the vma-manager, a given node must
84 * always be guaranteed to be referenced.
85 */
86void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
87 unsigned long page_offset, unsigned long size)
88{
89 rwlock_init(&mgr->vm_lock);
90 mgr->vm_addr_space_rb = RB_ROOT;
91 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
92}
93EXPORT_SYMBOL(drm_vma_offset_manager_init);
94
95/**
96 * drm_vma_offset_manager_destroy() - Destroy offset manager
97 * @mgr: Manager object
98 *
99 * Destroy an object manager which was previously created via
100 * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
101 * before destroying the manager. Otherwise, drm_mm will refuse to free the
102 * requested resources.
103 *
104 * The manager must not be accessed after this function is called.
105 */
106void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
107{
108 /* take the lock to protect against buggy drivers */
109 write_lock(&mgr->vm_lock);
110 drm_mm_takedown(&mgr->vm_addr_space_mm);
111 write_unlock(&mgr->vm_lock);
112}
113EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
114
115/**
116 * drm_vma_offset_lookup() - Find node in offset space
117 * @mgr: Manager object
118 * @start: Start address for object (page-based)
119 * @pages: Size of object (page-based)
120 *
121 * Find a node given a start address and object size. This returns the _best_
122 * match for the given node. That is, @start may point somewhere into a valid
123 * region and the given node will be returned, as long as the node spans the
124 * whole requested area (given the size in number of pages as @pages).
125 *
126 * RETURNS:
127 * Returns NULL if no suitable node can be found. Otherwise, the best match
128 * is returned. It's the caller's responsibility to make sure the node doesn't
129 * get destroyed before the caller can access it.
130 */
131struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
132 unsigned long start,
133 unsigned long pages)
134{
135 struct drm_vma_offset_node *node;
136
137 read_lock(&mgr->vm_lock);
138 node = drm_vma_offset_lookup_locked(mgr, start, pages);
139 read_unlock(&mgr->vm_lock);
140
141 return node;
142}
143EXPORT_SYMBOL(drm_vma_offset_lookup);
144
145/**
146 * drm_vma_offset_lookup_locked() - Find node in offset space
147 * @mgr: Manager object
148 * @start: Start address for object (page-based)
149 * @pages: Size of object (page-based)
150 *
151 * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
152 * manually. See drm_vma_offset_lock_lookup() for an example.
153 *
154 * RETURNS:
155 * Returns NULL if no suitable node can be found. Otherwise, the best match
156 * is returned.
157 */
158struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
159 unsigned long start,
160 unsigned long pages)
161{
162 struct drm_vma_offset_node *node, *best;
163 struct rb_node *iter;
164 unsigned long offset;
165
166 iter = mgr->vm_addr_space_rb.rb_node;
167 best = NULL;
168
169 while (likely(iter)) {
170 node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
171 offset = node->vm_node.start;
172 if (start >= offset) {
173 iter = iter->rb_right;
174 best = node;
175 if (start == offset)
176 break;
177 } else {
178 iter = iter->rb_left;
179 }
180 }
181
182 /* verify that the node spans the requested area */
183 if (best) {
184 offset = best->vm_node.start + best->vm_node.size;
185 if (offset < start + pages)
186 best = NULL;
187 }
188
189 return best;
190}
191EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
192
193/* internal helper to link @node into the rb-tree */
194static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
195 struct drm_vma_offset_node *node)
196{
197 struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
198 struct rb_node *parent = NULL;
199 struct drm_vma_offset_node *iter_node;
200
201 while (likely(*iter)) {
202 parent = *iter;
203 iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
204
205 if (node->vm_node.start < iter_node->vm_node.start)
206 iter = &(*iter)->rb_left;
207 else if (node->vm_node.start > iter_node->vm_node.start)
208 iter = &(*iter)->rb_right;
209 else
210 BUG();
211 }
212
213 rb_link_node(&node->vm_rb, parent, iter);
214 rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
215}
216
217/**
218 * drm_vma_offset_add() - Add offset node to manager
219 * @mgr: Manager object
220 * @node: Node to be added
221 * @pages: Allocation size visible to user-space (in number of pages)
222 *
223 * Add a node to the offset-manager. If the node was already added, this does
224 * nothing and return 0. @pages is the size of the object given in number of
225 * pages.
226 * After this call succeeds, you can access the offset of the node until it
227 * is removed again.
228 *
229 * If this call fails, it is safe to retry the operation or call
230 * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
231 * case.
232 *
233 * @pages is not required to be the same size as the underlying memory object
234 * that you want to map. It only limits the size that user-space can map into
235 * their address space.
236 *
237 * RETURNS:
238 * 0 on success, negative error code on failure.
239 */
240int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
241 struct drm_vma_offset_node *node, unsigned long pages)
242{
243 int ret;
244
245 write_lock(&mgr->vm_lock);
246
247 if (drm_mm_node_allocated(&node->vm_node)) {
248 ret = 0;
249 goto out_unlock;
250 }
251
252 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
253 pages, 0, DRM_MM_SEARCH_DEFAULT);
254 if (ret)
255 goto out_unlock;
256
257 _drm_vma_offset_add_rb(mgr, node);
258
259out_unlock:
260 write_unlock(&mgr->vm_lock);
261 return ret;
262}
263EXPORT_SYMBOL(drm_vma_offset_add);
264
265/**
266 * drm_vma_offset_remove() - Remove offset node from manager
267 * @mgr: Manager object
268 * @node: Node to be removed
269 *
270 * Remove a node from the offset manager. If the node wasn't added before, this
271 * does nothing. After this call returns, the offset and size will be 0 until a
272 * new offset is allocated via drm_vma_offset_add() again. Helper functions like
273 * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
274 * offset is allocated.
275 */
276void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
277 struct drm_vma_offset_node *node)
278{
279 write_lock(&mgr->vm_lock);
280
281 if (drm_mm_node_allocated(&node->vm_node)) {
282 rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
283 drm_mm_remove_node(&node->vm_node);
284 memset(&node->vm_node, 0, sizeof(node->vm_node));
285 }
286
287 write_unlock(&mgr->vm_lock);
288}
289EXPORT_SYMBOL(drm_vma_offset_remove);
290
291/**
292 * drm_vma_node_allow - Add open-file to list of allowed users
293 * @node: Node to modify
294 * @filp: Open file to add
295 *
296 * Add @filp to the list of allowed open-files for this node. If @filp is
297 * already on this list, the ref-count is incremented.
298 *
299 * The list of allowed-users is preserved across drm_vma_offset_add() and
300 * drm_vma_offset_remove() calls. You may even call it if the node is currently
301 * not added to any offset-manager.
302 *
303 * You must remove all open-files the same number of times as you added them
304 * before destroying the node. Otherwise, you will leak memory.
305 *
306 * This is locked against concurrent access internally.
307 *
308 * RETURNS:
309 * 0 on success, negative error code on internal failure (out-of-mem)
310 */
311int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
312{
313 struct rb_node **iter;
314 struct rb_node *parent = NULL;
315 struct drm_vma_offset_file *new, *entry;
316 int ret = 0;
317
318 /* Preallocate entry to avoid atomic allocations below. It is quite
319 * unlikely that an open-file is added twice to a single node so we
320 * don't optimize for this case. OOM is checked below only if the entry
321 * is actually used. */
322 new = kmalloc(sizeof(*entry), GFP_KERNEL);
323
324 write_lock(&node->vm_lock);
325
326 iter = &node->vm_files.rb_node;
327
328 while (likely(*iter)) {
329 parent = *iter;
330 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
331
332 if (filp == entry->vm_filp) {
333 entry->vm_count++;
334 goto unlock;
335 } else if (filp > entry->vm_filp) {
336 iter = &(*iter)->rb_right;
337 } else {
338 iter = &(*iter)->rb_left;
339 }
340 }
341
342 if (!new) {
343 ret = -ENOMEM;
344 goto unlock;
345 }
346
347 new->vm_filp = filp;
348 new->vm_count = 1;
349 rb_link_node(&new->vm_rb, parent, iter);
350 rb_insert_color(&new->vm_rb, &node->vm_files);
351 new = NULL;
352
353unlock:
354 write_unlock(&node->vm_lock);
355 kfree(new);
356 return ret;
357}
358EXPORT_SYMBOL(drm_vma_node_allow);
359
360/**
361 * drm_vma_node_revoke - Remove open-file from list of allowed users
362 * @node: Node to modify
363 * @filp: Open file to remove
364 *
365 * Decrement the ref-count of @filp in the list of allowed open-files on @node.
366 * If the ref-count drops to zero, remove @filp from the list. You must call
367 * this once for every drm_vma_node_allow() on @filp.
368 *
369 * This is locked against concurrent access internally.
370 *
371 * If @filp is not on the list, nothing is done.
372 */
373void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
374{
375 struct drm_vma_offset_file *entry;
376 struct rb_node *iter;
377
378 write_lock(&node->vm_lock);
379
380 iter = node->vm_files.rb_node;
381 while (likely(iter)) {
382 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
383 if (filp == entry->vm_filp) {
384 if (!--entry->vm_count) {
385 rb_erase(&entry->vm_rb, &node->vm_files);
386 kfree(entry);
387 }
388 break;
389 } else if (filp > entry->vm_filp) {
390 iter = iter->rb_right;
391 } else {
392 iter = iter->rb_left;
393 }
394 }
395
396 write_unlock(&node->vm_lock);
397}
398EXPORT_SYMBOL(drm_vma_node_revoke);
399
400/**
401 * drm_vma_node_is_allowed - Check whether an open-file is granted access
402 * @node: Node to check
403 * @filp: Open-file to check for
404 *
405 * Search the list in @node whether @filp is currently on the list of allowed
406 * open-files (see drm_vma_node_allow()).
407 *
408 * This is locked against concurrent access internally.
409 *
410 * RETURNS:
411 * true iff @filp is on the list
412 */
413bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
414 struct file *filp)
415{
416 struct drm_vma_offset_file *entry;
417 struct rb_node *iter;
418
419 read_lock(&node->vm_lock);
420
421 iter = node->vm_files.rb_node;
422 while (likely(iter)) {
423 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
424 if (filp == entry->vm_filp)
425 break;
426 else if (filp > entry->vm_filp)
427 iter = iter->rb_right;
428 else
429 iter = iter->rb_left;
430 }
431
432 read_unlock(&node->vm_lock);
433
434 return iter;
435}
436EXPORT_SYMBOL(drm_vma_node_is_allowed);
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 95c75edef01a..30ef41bcd7b8 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/module.h>
19 18
20 19
21#include "exynos_drm_drv.h" 20#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9a35d171a6d3..14f5c1d34028 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -184,8 +184,9 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
184}; 184};
185 185
186static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, 186static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
187 struct drm_framebuffer *fb, 187 struct drm_framebuffer *fb,
188 struct drm_pending_vblank_event *event) 188 struct drm_pending_vblank_event *event,
189 uint32_t page_flip_flags)
189{ 190{
190 struct drm_device *dev = crtc->dev; 191 struct drm_device *dev = crtc->dev;
191 struct exynos_drm_private *dev_priv = dev->dev_private; 192 struct exynos_drm_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index a0f997e0cbdf..fd76449cf452 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -22,6 +22,11 @@ struct exynos_drm_dmabuf_attachment {
22 bool is_mapped; 22 bool is_mapped;
23}; 23};
24 24
25static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
26{
27 return to_exynos_gem_obj(buf->priv);
28}
29
25static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, 30static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
26 struct device *dev, 31 struct device *dev,
27 struct dma_buf_attachment *attach) 32 struct dma_buf_attachment *attach)
@@ -63,7 +68,7 @@ static struct sg_table *
63 enum dma_data_direction dir) 68 enum dma_data_direction dir)
64{ 69{
65 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; 70 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
66 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; 71 struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
67 struct drm_device *dev = gem_obj->base.dev; 72 struct drm_device *dev = gem_obj->base.dev;
68 struct exynos_drm_gem_buf *buf; 73 struct exynos_drm_gem_buf *buf;
69 struct scatterlist *rd, *wr; 74 struct scatterlist *rd, *wr;
@@ -127,27 +132,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
127 /* Nothing to do. */ 132 /* Nothing to do. */
128} 133}
129 134
130static void exynos_dmabuf_release(struct dma_buf *dmabuf)
131{
132 struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
133
134 /*
135 * exynos_dmabuf_release() call means that file object's
136 * f_count is 0 and it calls drm_gem_object_handle_unreference()
137 * to drop the references that these values had been increased
138 * at drm_prime_handle_to_fd()
139 */
140 if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
141 exynos_gem_obj->base.export_dma_buf = NULL;
142
143 /*
144 * drop this gem object refcount to release allocated buffer
145 * and resources.
146 */
147 drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
148 }
149}
150
151static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 135static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
152 unsigned long page_num) 136 unsigned long page_num)
153{ 137{
@@ -193,7 +177,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
193 .kunmap = exynos_gem_dmabuf_kunmap, 177 .kunmap = exynos_gem_dmabuf_kunmap,
194 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, 178 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
195 .mmap = exynos_gem_dmabuf_mmap, 179 .mmap = exynos_gem_dmabuf_mmap,
196 .release = exynos_dmabuf_release, 180 .release = drm_gem_dmabuf_release,
197}; 181};
198 182
199struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, 183struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
@@ -201,7 +185,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
201{ 185{
202 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 186 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
203 187
204 return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops, 188 return dma_buf_export(obj, &exynos_dmabuf_ops,
205 exynos_gem_obj->base.size, flags); 189 exynos_gem_obj->base.size, flags);
206} 190}
207 191
@@ -219,8 +203,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
219 if (dma_buf->ops == &exynos_dmabuf_ops) { 203 if (dma_buf->ops == &exynos_dmabuf_ops) {
220 struct drm_gem_object *obj; 204 struct drm_gem_object *obj;
221 205
222 exynos_gem_obj = dma_buf->priv; 206 obj = dma_buf->priv;
223 obj = &exynos_gem_obj->base;
224 207
225 /* is it from our device? */ 208 /* is it from our device? */
226 if (obj->dev == drm_dev) { 209 if (obj->dev == drm_dev) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ca2729a85129..df81d3c959b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -213,7 +213,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
213 .close = drm_gem_vm_close, 213 .close = drm_gem_vm_close,
214}; 214};
215 215
216static struct drm_ioctl_desc exynos_ioctls[] = { 216static const struct drm_ioctl_desc exynos_ioctls[] = {
217 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, 217 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
218 DRM_UNLOCKED | DRM_AUTH), 218 DRM_UNLOCKED | DRM_AUTH),
219 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET, 219 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
@@ -271,12 +271,13 @@ static struct drm_driver exynos_drm_driver = {
271 .gem_vm_ops = &exynos_drm_gem_vm_ops, 271 .gem_vm_ops = &exynos_drm_gem_vm_ops,
272 .dumb_create = exynos_drm_gem_dumb_create, 272 .dumb_create = exynos_drm_gem_dumb_create,
273 .dumb_map_offset = exynos_drm_gem_dumb_map_offset, 273 .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
274 .dumb_destroy = exynos_drm_gem_dumb_destroy, 274 .dumb_destroy = drm_gem_dumb_destroy,
275 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 275 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
276 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 276 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
277 .gem_prime_export = exynos_dmabuf_prime_export, 277 .gem_prime_export = exynos_dmabuf_prime_export,
278 .gem_prime_import = exynos_dmabuf_prime_import, 278 .gem_prime_import = exynos_dmabuf_prime_import,
279 .ioctls = exynos_ioctls, 279 .ioctls = exynos_ioctls,
280 .num_ioctls = ARRAY_SIZE(exynos_ioctls),
280 .fops = &exynos_drm_driver_fops, 281 .fops = &exynos_drm_driver_fops,
281 .name = DRIVER_NAME, 282 .name = DRIVER_NAME,
282 .desc = DRIVER_DESC, 283 .desc = DRIVER_DESC,
@@ -288,7 +289,6 @@ static struct drm_driver exynos_drm_driver = {
288static int exynos_drm_platform_probe(struct platform_device *pdev) 289static int exynos_drm_platform_probe(struct platform_device *pdev)
289{ 290{
290 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 291 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
291 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
292 292
293 return drm_platform_init(&exynos_drm_driver, pdev); 293 return drm_platform_init(&exynos_drm_driver, pdev);
294} 294}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 61b094f689a7..6e047bd53e2f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,7 +12,6 @@
12 * 12 *
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h> 15#include <linux/platform_device.h>
17#include <linux/mfd/syscon.h> 16#include <linux/mfd/syscon.h>
18#include <linux/regmap.h> 17#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 3e106beca5b6..1c263dac3c1c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -14,7 +14,6 @@
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/platform_device.h> 17#include <linux/platform_device.h>
19#include <linux/clk.h> 18#include <linux/clk.h>
20#include <linux/of_device.h> 19#include <linux/of_device.h>
@@ -130,7 +129,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
130 .data = &exynos5_fimd_driver_data }, 129 .data = &exynos5_fimd_driver_data },
131 {}, 130 {},
132}; 131};
133MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
134#endif 132#endif
135 133
136static inline struct fimd_driver_data *drm_fimd_get_driver_data( 134static inline struct fimd_driver_data *drm_fimd_get_driver_data(
@@ -1082,7 +1080,6 @@ static struct platform_device_id fimd_driver_ids[] = {
1082 }, 1080 },
1083 {}, 1081 {},
1084}; 1082};
1085MODULE_DEVICE_TABLE(platform, fimd_driver_ids);
1086 1083
1087static const struct dev_pm_ops fimd_pm_ops = { 1084static const struct dev_pm_ops fimd_pm_ops = {
1088 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) 1085 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 42a5a5466075..eddea4941483 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/clk.h> 11#include <linux/clk.h>
13#include <linux/err.h> 12#include <linux/err.h>
14#include <linux/interrupt.h> 13#include <linux/interrupt.h>
@@ -806,9 +805,20 @@ static void g2d_dma_start(struct g2d_data *g2d,
806 struct g2d_cmdlist_node *node = 805 struct g2d_cmdlist_node *node =
807 list_first_entry(&runqueue_node->run_cmdlist, 806 list_first_entry(&runqueue_node->run_cmdlist,
808 struct g2d_cmdlist_node, list); 807 struct g2d_cmdlist_node, list);
808 int ret;
809
810 ret = pm_runtime_get_sync(g2d->dev);
811 if (ret < 0) {
812 dev_warn(g2d->dev, "failed pm power on.\n");
813 return;
814 }
809 815
810 pm_runtime_get_sync(g2d->dev); 816 ret = clk_prepare_enable(g2d->gate_clk);
811 clk_enable(g2d->gate_clk); 817 if (ret < 0) {
818 dev_warn(g2d->dev, "failed to enable clock.\n");
819 pm_runtime_put_sync(g2d->dev);
820 return;
821 }
812 822
813 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); 823 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
814 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); 824 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -861,7 +871,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
861 runqueue_work); 871 runqueue_work);
862 872
863 mutex_lock(&g2d->runqueue_mutex); 873 mutex_lock(&g2d->runqueue_mutex);
864 clk_disable(g2d->gate_clk); 874 clk_disable_unprepare(g2d->gate_clk);
865 pm_runtime_put_sync(g2d->dev); 875 pm_runtime_put_sync(g2d->dev);
866 876
867 complete(&g2d->runqueue_node->complete); 877 complete(&g2d->runqueue_node->complete);
@@ -1521,7 +1531,6 @@ static const struct of_device_id exynos_g2d_match[] = {
1521 { .compatible = "samsung,exynos5250-g2d" }, 1531 { .compatible = "samsung,exynos5250-g2d" },
1522 {}, 1532 {},
1523}; 1533};
1524MODULE_DEVICE_TABLE(of, exynos_g2d_match);
1525#endif 1534#endif
1526 1535
1527struct platform_driver g2d_driver = { 1536struct platform_driver g2d_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 24c22a8c3364..f3c6f40666e1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13#include <drm/drm_vma_manager.h>
13 14
14#include <linux/shmem_fs.h> 15#include <linux/shmem_fs.h>
15#include <drm/exynos_drm.h> 16#include <drm/exynos_drm.h>
@@ -135,7 +136,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
135 obj = &exynos_gem_obj->base; 136 obj = &exynos_gem_obj->base;
136 buf = exynos_gem_obj->buffer; 137 buf = exynos_gem_obj->buffer;
137 138
138 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 139 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
139 140
140 /* 141 /*
141 * do not release memory region from exporter. 142 * do not release memory region from exporter.
@@ -152,8 +153,7 @@ out:
152 exynos_drm_fini_buf(obj->dev, buf); 153 exynos_drm_fini_buf(obj->dev, buf);
153 exynos_gem_obj->buffer = NULL; 154 exynos_gem_obj->buffer = NULL;
154 155
155 if (obj->map_list.map) 156 drm_gem_free_mmap_offset(obj);
156 drm_gem_free_mmap_offset(obj);
157 157
158 /* release file pointer to gem object. */ 158 /* release file pointer to gem object. */
159 drm_gem_object_release(obj); 159 drm_gem_object_release(obj);
@@ -703,13 +703,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
703 goto unlock; 703 goto unlock;
704 } 704 }
705 705
706 if (!obj->map_list.map) { 706 ret = drm_gem_create_mmap_offset(obj);
707 ret = drm_gem_create_mmap_offset(obj); 707 if (ret)
708 if (ret) 708 goto out;
709 goto out;
710 }
711 709
712 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; 710 *offset = drm_vma_node_offset_addr(&obj->vma_node);
713 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 711 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
714 712
715out: 713out:
@@ -719,26 +717,6 @@ unlock:
719 return ret; 717 return ret;
720} 718}
721 719
722int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
723 struct drm_device *dev,
724 unsigned int handle)
725{
726 int ret;
727
728 /*
729 * obj->refcount and obj->handle_count are decreased and
730 * if both them are 0 then exynos_drm_gem_free_object()
731 * would be called by callback to release resources.
732 */
733 ret = drm_gem_handle_delete(file_priv, handle);
734 if (ret < 0) {
735 DRM_ERROR("failed to delete drm_gem_handle.\n");
736 return ret;
737 }
738
739 return 0;
740}
741
742int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 720int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
743{ 721{
744 struct drm_gem_object *obj = vma->vm_private_data; 722 struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 468766bee450..09555afdfe9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
151 struct drm_device *dev, uint32_t handle, 151 struct drm_device *dev, uint32_t handle,
152 uint64_t *offset); 152 uint64_t *offset);
153 153
154/*
155 * destroy memory region allocated.
156 * - a gem handle and physical memory region pointed by a gem object
157 * would be released by drm_gem_handle_delete().
158 */
159int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
160 struct drm_device *dev,
161 unsigned int handle);
162
163/* page fault handler and mmap fault address(virtual) to physical memory. */ 154/* page fault handler and mmap fault address(virtual) to physical memory. */
164int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 155int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
165 156
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 472e3b25e7f2..90b8a1a5344c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -12,7 +12,6 @@
12 * 12 *
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h> 15#include <linux/platform_device.h>
17#include <linux/clk.h> 16#include <linux/clk.h>
18#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index aaa550d622f0..8d3bc01d6834 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/wait.h> 17#include <linux/wait.h>
18#include <linux/module.h>
19#include <linux/platform_device.h> 18#include <linux/platform_device.h>
20#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
21 20
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index b1ef8e7ff9c9..d2b6ab4def93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -12,7 +12,6 @@
12 * 12 *
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h> 15#include <linux/platform_device.h>
17#include <linux/types.h> 16#include <linux/types.h>
18#include <linux/clk.h> 17#include <linux/clk.h>
@@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
342 */ 341 */
343 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 342 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
344 prop_list->ipp_id); 343 prop_list->ipp_id);
345 if (!ippdrv) { 344 if (IS_ERR(ippdrv)) {
346 DRM_ERROR("not found ipp%d driver.\n", 345 DRM_ERROR("not found ipp%d driver.\n",
347 prop_list->ipp_id); 346 prop_list->ipp_id);
348 return -EINVAL; 347 return PTR_ERR(ippdrv);
349 } 348 }
350 349
351 prop_list = ippdrv->prop_list; 350 prop_list = ippdrv->prop_list;
@@ -970,9 +969,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
970 /* find command node */ 969 /* find command node */
971 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 970 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
972 qbuf->prop_id); 971 qbuf->prop_id);
973 if (!c_node) { 972 if (IS_ERR(c_node)) {
974 DRM_ERROR("failed to get command node.\n"); 973 DRM_ERROR("failed to get command node.\n");
975 return -EFAULT; 974 return PTR_ERR(c_node);
976 } 975 }
977 976
978 /* buffer control */ 977 /* buffer control */
@@ -1106,9 +1105,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1106 1105
1107 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1106 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1108 cmd_ctrl->prop_id); 1107 cmd_ctrl->prop_id);
1109 if (!c_node) { 1108 if (IS_ERR(c_node)) {
1110 DRM_ERROR("invalid command node list.\n"); 1109 DRM_ERROR("invalid command node list.\n");
1111 return -EINVAL; 1110 return PTR_ERR(c_node);
1112 } 1111 }
1113 1112
1114 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, 1113 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 427640aa5148..49669aa24c45 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/err.h> 13#include <linux/err.h>
15#include <linux/interrupt.h> 14#include <linux/interrupt.h>
16#include <linux/io.h> 15#include <linux/io.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 41cc74d83e4e..c57c56519add 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -13,7 +13,6 @@
13#include <drm/drmP.h> 13#include <drm/drmP.h>
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18 17
19#include <drm/exynos_drm.h> 18#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 62ef5971ac3c..2f5c6942c968 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -24,7 +24,6 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/wait.h> 25#include <linux/wait.h>
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/module.h>
28#include <linux/platform_device.h> 27#include <linux/platform_device.h>
29#include <linux/interrupt.h> 28#include <linux/interrupt.h>
30#include <linux/irq.h> 29#include <linux/irq.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index ef04255076c7..6e320ae9afed 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/module.h>
19 18
20#include "exynos_drm_drv.h" 19#include "exynos_drm_drv.h"
21#include "exynos_hdmi.h" 20#include "exynos_hdmi.h"
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 42ffb71c63bc..c9a137caea41 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -23,7 +23,6 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/wait.h> 24#include <linux/wait.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/module.h>
27#include <linux/platform_device.h> 26#include <linux/platform_device.h>
28#include <linux/interrupt.h> 27#include <linux/interrupt.h>
29#include <linux/irq.h> 28#include <linux/irq.h>
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 7a2d40a5c1e1..e9064dd9045d 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -15,6 +15,7 @@ gma500_gfx-y += \
15 mmu.o \ 15 mmu.o \
16 power.o \ 16 power.o \
17 psb_drv.o \ 17 psb_drv.o \
18 gma_display.o \
18 psb_intel_display.o \ 19 psb_intel_display.o \
19 psb_intel_lvds.o \ 20 psb_intel_lvds.o \
20 psb_intel_modes.o \ 21 psb_intel_modes.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 23e14e93991f..162f686c532d 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -641,6 +641,7 @@ const struct psb_ops cdv_chip_ops = {
641 641
642 .crtc_helper = &cdv_intel_helper_funcs, 642 .crtc_helper = &cdv_intel_helper_funcs,
643 .crtc_funcs = &cdv_intel_crtc_funcs, 643 .crtc_funcs = &cdv_intel_crtc_funcs,
644 .clock_funcs = &cdv_clock_funcs,
644 645
645 .output_init = cdv_output_init, 646 .output_init = cdv_output_init,
646 .hotplug = cdv_hotplug_event, 647 .hotplug = cdv_hotplug_event,
@@ -655,4 +656,6 @@ const struct psb_ops cdv_chip_ops = {
655 .restore_regs = cdv_restore_display_registers, 656 .restore_regs = cdv_restore_display_registers,
656 .power_down = cdv_power_down, 657 .power_down = cdv_power_down,
657 .power_up = cdv_power_up, 658 .power_up = cdv_power_up,
659 .update_wm = cdv_update_wm,
660 .disable_sr = cdv_disable_sr,
658}; 661};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
index 9561e17621b3..705c11d47d45 100644
--- a/drivers/gpu/drm/gma500/cdv_device.h
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -17,6 +17,7 @@
17 17
18extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs; 18extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
19extern const struct drm_crtc_funcs cdv_intel_crtc_funcs; 19extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
20extern const struct gma_clock_funcs cdv_clock_funcs;
20extern void cdv_intel_crt_init(struct drm_device *dev, 21extern void cdv_intel_crt_init(struct drm_device *dev,
21 struct psb_intel_mode_device *mode_dev); 22 struct psb_intel_mode_device *mode_dev);
22extern void cdv_intel_lvds_init(struct drm_device *dev, 23extern void cdv_intel_lvds_init(struct drm_device *dev,
@@ -25,12 +26,5 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *
25 int reg); 26 int reg);
26extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, 27extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
27 struct drm_crtc *crtc); 28 struct drm_crtc *crtc);
28 29extern void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc);
29static inline void cdv_intel_wait_for_vblank(struct drm_device *dev) 30extern void cdv_disable_sr(struct drm_device *dev);
30{
31 /* Wait for 20ms, i.e. one cycle at 50hz. */
32 /* FIXME: msleep ?? */
33 mdelay(20);
34}
35
36
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 7b8386fc3024..661af492173d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -95,13 +95,12 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
95 95
96 struct drm_device *dev = encoder->dev; 96 struct drm_device *dev = encoder->dev;
97 struct drm_crtc *crtc = encoder->crtc; 97 struct drm_crtc *crtc = encoder->crtc;
98 struct psb_intel_crtc *psb_intel_crtc = 98 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
99 to_psb_intel_crtc(crtc);
100 int dpll_md_reg; 99 int dpll_md_reg;
101 u32 adpa, dpll_md; 100 u32 adpa, dpll_md;
102 u32 adpa_reg; 101 u32 adpa_reg;
103 102
104 if (psb_intel_crtc->pipe == 0) 103 if (gma_crtc->pipe == 0)
105 dpll_md_reg = DPLL_A_MD; 104 dpll_md_reg = DPLL_A_MD;
106 else 105 else
107 dpll_md_reg = DPLL_B_MD; 106 dpll_md_reg = DPLL_B_MD;
@@ -124,7 +123,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
124 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 123 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
125 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 124 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
126 125
127 if (psb_intel_crtc->pipe == 0) 126 if (gma_crtc->pipe == 0)
128 adpa |= ADPA_PIPE_A_SELECT; 127 adpa |= ADPA_PIPE_A_SELECT;
129 else 128 else
130 adpa |= ADPA_PIPE_B_SELECT; 129 adpa |= ADPA_PIPE_B_SELECT;
@@ -197,10 +196,9 @@ static enum drm_connector_status cdv_intel_crt_detect(
197 196
198static void cdv_intel_crt_destroy(struct drm_connector *connector) 197static void cdv_intel_crt_destroy(struct drm_connector *connector)
199{ 198{
200 struct psb_intel_encoder *psb_intel_encoder = 199 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
201 psb_intel_attached_encoder(connector);
202 200
203 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); 201 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
204 drm_sysfs_connector_remove(connector); 202 drm_sysfs_connector_remove(connector);
205 drm_connector_cleanup(connector); 203 drm_connector_cleanup(connector);
206 kfree(connector); 204 kfree(connector);
@@ -208,9 +206,9 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector)
208 206
209static int cdv_intel_crt_get_modes(struct drm_connector *connector) 207static int cdv_intel_crt_get_modes(struct drm_connector *connector)
210{ 208{
211 struct psb_intel_encoder *psb_intel_encoder = 209 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
212 psb_intel_attached_encoder(connector); 210 return psb_intel_ddc_get_modes(connector,
213 return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter); 211 &gma_encoder->ddc_bus->adapter);
214} 212}
215 213
216static int cdv_intel_crt_set_property(struct drm_connector *connector, 214static int cdv_intel_crt_set_property(struct drm_connector *connector,
@@ -227,8 +225,8 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
227static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = { 225static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
228 .dpms = cdv_intel_crt_dpms, 226 .dpms = cdv_intel_crt_dpms,
229 .mode_fixup = cdv_intel_crt_mode_fixup, 227 .mode_fixup = cdv_intel_crt_mode_fixup,
230 .prepare = psb_intel_encoder_prepare, 228 .prepare = gma_encoder_prepare,
231 .commit = psb_intel_encoder_commit, 229 .commit = gma_encoder_commit,
232 .mode_set = cdv_intel_crt_mode_set, 230 .mode_set = cdv_intel_crt_mode_set,
233}; 231};
234 232
@@ -244,7 +242,7 @@ static const struct drm_connector_helper_funcs
244 cdv_intel_crt_connector_helper_funcs = { 242 cdv_intel_crt_connector_helper_funcs = {
245 .mode_valid = cdv_intel_crt_mode_valid, 243 .mode_valid = cdv_intel_crt_mode_valid,
246 .get_modes = cdv_intel_crt_get_modes, 244 .get_modes = cdv_intel_crt_get_modes,
247 .best_encoder = psb_intel_best_encoder, 245 .best_encoder = gma_best_encoder,
248}; 246};
249 247
250static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder) 248static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
@@ -260,32 +258,31 @@ void cdv_intel_crt_init(struct drm_device *dev,
260 struct psb_intel_mode_device *mode_dev) 258 struct psb_intel_mode_device *mode_dev)
261{ 259{
262 260
263 struct psb_intel_connector *psb_intel_connector; 261 struct gma_connector *gma_connector;
264 struct psb_intel_encoder *psb_intel_encoder; 262 struct gma_encoder *gma_encoder;
265 struct drm_connector *connector; 263 struct drm_connector *connector;
266 struct drm_encoder *encoder; 264 struct drm_encoder *encoder;
267 265
268 u32 i2c_reg; 266 u32 i2c_reg;
269 267
270 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 268 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
271 if (!psb_intel_encoder) 269 if (!gma_encoder)
272 return; 270 return;
273 271
274 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 272 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
275 if (!psb_intel_connector) 273 if (!gma_connector)
276 goto failed_connector; 274 goto failed_connector;
277 275
278 connector = &psb_intel_connector->base; 276 connector = &gma_connector->base;
279 connector->polled = DRM_CONNECTOR_POLL_HPD; 277 connector->polled = DRM_CONNECTOR_POLL_HPD;
280 drm_connector_init(dev, connector, 278 drm_connector_init(dev, connector,
281 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 279 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
282 280
283 encoder = &psb_intel_encoder->base; 281 encoder = &gma_encoder->base;
284 drm_encoder_init(dev, encoder, 282 drm_encoder_init(dev, encoder,
285 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); 283 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
286 284
287 psb_intel_connector_attach_encoder(psb_intel_connector, 285 gma_connector_attach_encoder(gma_connector, gma_encoder);
288 psb_intel_encoder);
289 286
290 /* Set up the DDC bus. */ 287 /* Set up the DDC bus. */
291 i2c_reg = GPIOA; 288 i2c_reg = GPIOA;
@@ -294,15 +291,15 @@ void cdv_intel_crt_init(struct drm_device *dev,
294 if (dev_priv->crt_ddc_bus != 0) 291 if (dev_priv->crt_ddc_bus != 0)
295 i2c_reg = dev_priv->crt_ddc_bus; 292 i2c_reg = dev_priv->crt_ddc_bus;
296 }*/ 293 }*/
297 psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev, 294 gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
298 i2c_reg, "CRTDDC_A"); 295 i2c_reg, "CRTDDC_A");
299 if (!psb_intel_encoder->ddc_bus) { 296 if (!gma_encoder->ddc_bus) {
300 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 297 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
301 "failed.\n"); 298 "failed.\n");
302 goto failed_ddc; 299 goto failed_ddc;
303 } 300 }
304 301
305 psb_intel_encoder->type = INTEL_OUTPUT_ANALOG; 302 gma_encoder->type = INTEL_OUTPUT_ANALOG;
306 /* 303 /*
307 psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT); 304 psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
308 psb_intel_output->crtc_mask = (1 << 0) | (1 << 1); 305 psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
@@ -318,10 +315,10 @@ void cdv_intel_crt_init(struct drm_device *dev,
318 315
319 return; 316 return;
320failed_ddc: 317failed_ddc:
321 drm_encoder_cleanup(&psb_intel_encoder->base); 318 drm_encoder_cleanup(&gma_encoder->base);
322 drm_connector_cleanup(&psb_intel_connector->base); 319 drm_connector_cleanup(&gma_connector->base);
323 kfree(psb_intel_connector); 320 kfree(gma_connector);
324failed_connector: 321failed_connector:
325 kfree(psb_intel_encoder); 322 kfree(gma_encoder);
326 return; 323 return;
327} 324}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 82430ad8ba62..8fbfa06da62d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -19,54 +19,20 @@
19 */ 19 */
20 20
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23 22
24#include <drm/drmP.h> 23#include <drm/drmP.h>
25#include "framebuffer.h" 24#include "framebuffer.h"
26#include "psb_drv.h" 25#include "psb_drv.h"
27#include "psb_intel_drv.h" 26#include "psb_intel_drv.h"
28#include "psb_intel_reg.h" 27#include "psb_intel_reg.h"
29#include "psb_intel_display.h" 28#include "gma_display.h"
30#include "power.h" 29#include "power.h"
31#include "cdv_device.h" 30#include "cdv_device.h"
32 31
32static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
33 struct drm_crtc *crtc, int target,
34 int refclk, struct gma_clock_t *best_clock);
33 35
34struct cdv_intel_range_t {
35 int min, max;
36};
37
38struct cdv_intel_p2_t {
39 int dot_limit;
40 int p2_slow, p2_fast;
41};
42
43struct cdv_intel_clock_t {
44 /* given values */
45 int n;
46 int m1, m2;
47 int p1, p2;
48 /* derived values */
49 int dot;
50 int vco;
51 int m;
52 int p;
53};
54
55#define INTEL_P2_NUM 2
56
57struct cdv_intel_limit_t {
58 struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
59 struct cdv_intel_p2_t p2;
60 bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
61 int, int, struct cdv_intel_clock_t *);
62};
63
64static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
65 struct drm_crtc *crtc, int target, int refclk,
66 struct cdv_intel_clock_t *best_clock);
67static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
68 int refclk,
69 struct cdv_intel_clock_t *best_clock);
70 36
71#define CDV_LIMIT_SINGLE_LVDS_96 0 37#define CDV_LIMIT_SINGLE_LVDS_96 0
72#define CDV_LIMIT_SINGLE_LVDS_100 1 38#define CDV_LIMIT_SINGLE_LVDS_100 1
@@ -75,7 +41,7 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct
75#define CDV_LIMIT_DP_27 4 41#define CDV_LIMIT_DP_27 4
76#define CDV_LIMIT_DP_100 5 42#define CDV_LIMIT_DP_100 5
77 43
78static const struct cdv_intel_limit_t cdv_intel_limits[] = { 44static const struct gma_limit_t cdv_intel_limits[] = {
79 { /* CDV_SINGLE_LVDS_96MHz */ 45 { /* CDV_SINGLE_LVDS_96MHz */
80 .dot = {.min = 20000, .max = 115500}, 46 .dot = {.min = 20000, .max = 115500},
81 .vco = {.min = 1800000, .max = 3600000}, 47 .vco = {.min = 1800000, .max = 3600000},
@@ -85,9 +51,8 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
85 .m2 = {.min = 58, .max = 158}, 51 .m2 = {.min = 58, .max = 158},
86 .p = {.min = 28, .max = 140}, 52 .p = {.min = 28, .max = 140},
87 .p1 = {.min = 2, .max = 10}, 53 .p1 = {.min = 2, .max = 10},
88 .p2 = {.dot_limit = 200000, 54 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
89 .p2_slow = 14, .p2_fast = 14}, 55 .find_pll = gma_find_best_pll,
90 .find_pll = cdv_intel_find_best_PLL,
91 }, 56 },
92 { /* CDV_SINGLE_LVDS_100MHz */ 57 { /* CDV_SINGLE_LVDS_100MHz */
93 .dot = {.min = 20000, .max = 115500}, 58 .dot = {.min = 20000, .max = 115500},
@@ -102,7 +67,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
102 * is 80-224Mhz. Prefer single channel as much as possible. 67 * is 80-224Mhz. Prefer single channel as much as possible.
103 */ 68 */
104 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, 69 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
105 .find_pll = cdv_intel_find_best_PLL, 70 .find_pll = gma_find_best_pll,
106 }, 71 },
107 { /* CDV_DAC_HDMI_27MHz */ 72 { /* CDV_DAC_HDMI_27MHz */
108 .dot = {.min = 20000, .max = 400000}, 73 .dot = {.min = 20000, .max = 400000},
@@ -114,7 +79,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
114 .p = {.min = 5, .max = 90}, 79 .p = {.min = 5, .max = 90},
115 .p1 = {.min = 1, .max = 9}, 80 .p1 = {.min = 1, .max = 9},
116 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, 81 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
117 .find_pll = cdv_intel_find_best_PLL, 82 .find_pll = gma_find_best_pll,
118 }, 83 },
119 { /* CDV_DAC_HDMI_96MHz */ 84 { /* CDV_DAC_HDMI_96MHz */
120 .dot = {.min = 20000, .max = 400000}, 85 .dot = {.min = 20000, .max = 400000},
@@ -126,7 +91,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
126 .p = {.min = 5, .max = 100}, 91 .p = {.min = 5, .max = 100},
127 .p1 = {.min = 1, .max = 10}, 92 .p1 = {.min = 1, .max = 10},
128 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, 93 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
129 .find_pll = cdv_intel_find_best_PLL, 94 .find_pll = gma_find_best_pll,
130 }, 95 },
131 { /* CDV_DP_27MHz */ 96 { /* CDV_DP_27MHz */
132 .dot = {.min = 160000, .max = 272000}, 97 .dot = {.min = 160000, .max = 272000},
@@ -255,10 +220,10 @@ void cdv_sb_reset(struct drm_device *dev)
255 */ 220 */
256static int 221static int
257cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, 222cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
258 struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select) 223 struct gma_clock_t *clock, bool is_lvds, u32 ddi_select)
259{ 224{
260 struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc); 225 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
261 int pipe = psb_crtc->pipe; 226 int pipe = gma_crtc->pipe;
262 u32 m, n_vco, p; 227 u32 m, n_vco, p;
263 int ret = 0; 228 int ret = 0;
264 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 229 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
@@ -405,31 +370,11 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
405 return 0; 370 return 0;
406} 371}
407 372
408/* 373static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
409 * Returns whether any encoder on the specified pipe is of the specified type 374 int refclk)
410 */
411static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
412{
413 struct drm_device *dev = crtc->dev;
414 struct drm_mode_config *mode_config = &dev->mode_config;
415 struct drm_connector *l_entry;
416
417 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
418 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
419 struct psb_intel_encoder *psb_intel_encoder =
420 psb_intel_attached_encoder(l_entry);
421 if (psb_intel_encoder->type == type)
422 return true;
423 }
424 }
425 return false;
426}
427
428static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
429 int refclk)
430{ 375{
431 const struct cdv_intel_limit_t *limit; 376 const struct gma_limit_t *limit;
432 if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 377 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
433 /* 378 /*
434 * Now only single-channel LVDS is supported on CDV. If it is 379 * Now only single-channel LVDS is supported on CDV. If it is
435 * incorrect, please add the dual-channel LVDS. 380 * incorrect, please add the dual-channel LVDS.
@@ -438,8 +383,8 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
438 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96]; 383 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
439 else 384 else
440 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100]; 385 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
441 } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 386 } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
442 psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { 387 gma_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
443 if (refclk == 27000) 388 if (refclk == 27000)
444 limit = &cdv_intel_limits[CDV_LIMIT_DP_27]; 389 limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
445 else 390 else
@@ -454,8 +399,7 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
454} 399}
455 400
456/* m1 is reserved as 0 in CDV, n is a ring counter */ 401/* m1 is reserved as 0 in CDV, n is a ring counter */
457static void cdv_intel_clock(struct drm_device *dev, 402static void cdv_intel_clock(int refclk, struct gma_clock_t *clock)
458 int refclk, struct cdv_intel_clock_t *clock)
459{ 403{
460 clock->m = clock->m2 + 2; 404 clock->m = clock->m2 + 2;
461 clock->p = clock->p1 * clock->p2; 405 clock->p = clock->p1 * clock->p2;
@@ -463,93 +407,12 @@ static void cdv_intel_clock(struct drm_device *dev,
463 clock->dot = clock->vco / clock->p; 407 clock->dot = clock->vco / clock->p;
464} 408}
465 409
466 410static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
467#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } 411 struct drm_crtc *crtc, int target,
468static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc, 412 int refclk,
469 const struct cdv_intel_limit_t *limit, 413 struct gma_clock_t *best_clock)
470 struct cdv_intel_clock_t *clock)
471{
472 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
473 INTELPllInvalid("p1 out of range\n");
474 if (clock->p < limit->p.min || limit->p.max < clock->p)
475 INTELPllInvalid("p out of range\n");
476 /* unnecessary to check the range of m(m1/M2)/n again */
477 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
478 INTELPllInvalid("vco out of range\n");
479 /* XXX: We may need to be checking "Dot clock"
480 * depending on the multiplier, connector, etc.,
481 * rather than just a single range.
482 */
483 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
484 INTELPllInvalid("dot out of range\n");
485
486 return true;
487}
488
489static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
490 struct drm_crtc *crtc, int target, int refclk,
491 struct cdv_intel_clock_t *best_clock)
492{ 414{
493 struct drm_device *dev = crtc->dev; 415 struct gma_clock_t clock;
494 struct cdv_intel_clock_t clock;
495 int err = target;
496
497
498 if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
499 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
500 /*
501 * For LVDS, if the panel is on, just rely on its current
502 * settings for dual-channel. We haven't figured out how to
503 * reliably set up different single/dual channel state, if we
504 * even can.
505 */
506 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
507 LVDS_CLKB_POWER_UP)
508 clock.p2 = limit->p2.p2_fast;
509 else
510 clock.p2 = limit->p2.p2_slow;
511 } else {
512 if (target < limit->p2.dot_limit)
513 clock.p2 = limit->p2.p2_slow;
514 else
515 clock.p2 = limit->p2.p2_fast;
516 }
517
518 memset(best_clock, 0, sizeof(*best_clock));
519 clock.m1 = 0;
520 /* m1 is reserved as 0 in CDV, n is a ring counter.
521 So skip the m1 loop */
522 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
523 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
524 clock.m2++) {
525 for (clock.p1 = limit->p1.min;
526 clock.p1 <= limit->p1.max;
527 clock.p1++) {
528 int this_err;
529
530 cdv_intel_clock(dev, refclk, &clock);
531
532 if (!cdv_intel_PLL_is_valid(crtc,
533 limit, &clock))
534 continue;
535
536 this_err = abs(clock.dot - target);
537 if (this_err < err) {
538 *best_clock = clock;
539 err = this_err;
540 }
541 }
542 }
543 }
544
545 return err != target;
546}
547
548static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
549 int refclk,
550 struct cdv_intel_clock_t *best_clock)
551{
552 struct cdv_intel_clock_t clock;
553 if (refclk == 27000) { 416 if (refclk == 27000) {
554 if (target < 200000) { 417 if (target < 200000) {
555 clock.p1 = 2; 418 clock.p1 = 2;
@@ -584,85 +447,10 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct
584 clock.p = clock.p1 * clock.p2; 447 clock.p = clock.p1 * clock.p2;
585 clock.vco = (refclk * clock.m) / clock.n; 448 clock.vco = (refclk * clock.m) / clock.n;
586 clock.dot = clock.vco / clock.p; 449 clock.dot = clock.vco / clock.p;
587 memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t)); 450 memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
588 return true; 451 return true;
589} 452}
590 453
591static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
592 int x, int y, struct drm_framebuffer *old_fb)
593{
594 struct drm_device *dev = crtc->dev;
595 struct drm_psb_private *dev_priv = dev->dev_private;
596 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
597 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
598 int pipe = psb_intel_crtc->pipe;
599 const struct psb_offset *map = &dev_priv->regmap[pipe];
600 unsigned long start, offset;
601 u32 dspcntr;
602 int ret = 0;
603
604 if (!gma_power_begin(dev, true))
605 return 0;
606
607 /* no fb bound */
608 if (!crtc->fb) {
609 dev_err(dev->dev, "No FB bound\n");
610 goto psb_intel_pipe_cleaner;
611 }
612
613
614 /* We are displaying this buffer, make sure it is actually loaded
615 into the GTT */
616 ret = psb_gtt_pin(psbfb->gtt);
617 if (ret < 0)
618 goto psb_intel_pipe_set_base_exit;
619 start = psbfb->gtt->offset;
620 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
621
622 REG_WRITE(map->stride, crtc->fb->pitches[0]);
623
624 dspcntr = REG_READ(map->cntr);
625 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
626
627 switch (crtc->fb->bits_per_pixel) {
628 case 8:
629 dspcntr |= DISPPLANE_8BPP;
630 break;
631 case 16:
632 if (crtc->fb->depth == 15)
633 dspcntr |= DISPPLANE_15_16BPP;
634 else
635 dspcntr |= DISPPLANE_16BPP;
636 break;
637 case 24:
638 case 32:
639 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
640 break;
641 default:
642 dev_err(dev->dev, "Unknown color depth\n");
643 ret = -EINVAL;
644 goto psb_intel_pipe_set_base_exit;
645 }
646 REG_WRITE(map->cntr, dspcntr);
647
648 dev_dbg(dev->dev,
649 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
650
651 REG_WRITE(map->base, offset);
652 REG_READ(map->base);
653 REG_WRITE(map->surf, start);
654 REG_READ(map->surf);
655
656psb_intel_pipe_cleaner:
657 /* If there was a previous display we can now unpin it */
658 if (old_fb)
659 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
660
661psb_intel_pipe_set_base_exit:
662 gma_power_end(dev);
663 return ret;
664}
665
666#define FIFO_PIPEA (1 << 0) 454#define FIFO_PIPEA (1 << 0)
667#define FIFO_PIPEB (1 << 1) 455#define FIFO_PIPEB (1 << 1)
668 456
@@ -670,12 +458,12 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
670{ 458{
671 struct drm_crtc *crtc; 459 struct drm_crtc *crtc;
672 struct drm_psb_private *dev_priv = dev->dev_private; 460 struct drm_psb_private *dev_priv = dev->dev_private;
673 struct psb_intel_crtc *psb_intel_crtc = NULL; 461 struct gma_crtc *gma_crtc = NULL;
674 462
675 crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 463 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
676 psb_intel_crtc = to_psb_intel_crtc(crtc); 464 gma_crtc = to_gma_crtc(crtc);
677 465
678 if (crtc->fb == NULL || !psb_intel_crtc->active) 466 if (crtc->fb == NULL || !gma_crtc->active)
679 return false; 467 return false;
680 return true; 468 return true;
681} 469}
@@ -701,29 +489,29 @@ static bool cdv_intel_single_pipe_active (struct drm_device *dev)
701 489
702static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc) 490static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
703{ 491{
704 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 492 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
705 struct drm_mode_config *mode_config = &dev->mode_config; 493 struct drm_mode_config *mode_config = &dev->mode_config;
706 struct drm_connector *connector; 494 struct drm_connector *connector;
707 495
708 if (psb_intel_crtc->pipe != 1) 496 if (gma_crtc->pipe != 1)
709 return false; 497 return false;
710 498
711 list_for_each_entry(connector, &mode_config->connector_list, head) { 499 list_for_each_entry(connector, &mode_config->connector_list, head) {
712 struct psb_intel_encoder *psb_intel_encoder = 500 struct gma_encoder *gma_encoder =
713 psb_intel_attached_encoder(connector); 501 gma_attached_encoder(connector);
714 502
715 if (!connector->encoder 503 if (!connector->encoder
716 || connector->encoder->crtc != crtc) 504 || connector->encoder->crtc != crtc)
717 continue; 505 continue;
718 506
719 if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS) 507 if (gma_encoder->type == INTEL_OUTPUT_LVDS)
720 return true; 508 return true;
721 } 509 }
722 510
723 return false; 511 return false;
724} 512}
725 513
726static void cdv_intel_disable_self_refresh (struct drm_device *dev) 514void cdv_disable_sr(struct drm_device *dev)
727{ 515{
728 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) { 516 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
729 517
@@ -731,7 +519,7 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev)
731 REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN)); 519 REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
732 REG_READ(FW_BLC_SELF); 520 REG_READ(FW_BLC_SELF);
733 521
734 cdv_intel_wait_for_vblank(dev); 522 gma_wait_for_vblank(dev);
735 523
736 /* Cedarview workaround to write ovelay plane, which force to leave 524 /* Cedarview workaround to write ovelay plane, which force to leave
737 * MAX_FIFO state. 525 * MAX_FIFO state.
@@ -739,13 +527,14 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev)
739 REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/); 527 REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
740 REG_READ(OV_OVADD); 528 REG_READ(OV_OVADD);
741 529
742 cdv_intel_wait_for_vblank(dev); 530 gma_wait_for_vblank(dev);
743 } 531 }
744 532
745} 533}
746 534
747static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc) 535void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
748{ 536{
537 struct drm_psb_private *dev_priv = dev->dev_private;
749 538
750 if (cdv_intel_single_pipe_active(dev)) { 539 if (cdv_intel_single_pipe_active(dev)) {
751 u32 fw; 540 u32 fw;
@@ -780,12 +569,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
780 569
781 REG_WRITE(DSPFW6, 0x10); 570 REG_WRITE(DSPFW6, 0x10);
782 571
783 cdv_intel_wait_for_vblank(dev); 572 gma_wait_for_vblank(dev);
784 573
785 /* enable self-refresh for single pipe active */ 574 /* enable self-refresh for single pipe active */
786 REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 575 REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
787 REG_READ(FW_BLC_SELF); 576 REG_READ(FW_BLC_SELF);
788 cdv_intel_wait_for_vblank(dev); 577 gma_wait_for_vblank(dev);
789 578
790 } else { 579 } else {
791 580
@@ -797,216 +586,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
797 REG_WRITE(DSPFW5, 0x01010101); 586 REG_WRITE(DSPFW5, 0x01010101);
798 REG_WRITE(DSPFW6, 0x1d0); 587 REG_WRITE(DSPFW6, 0x1d0);
799 588
800 cdv_intel_wait_for_vblank(dev); 589 gma_wait_for_vblank(dev);
801
802 cdv_intel_disable_self_refresh(dev);
803
804 }
805}
806
807/** Loads the palette/gamma unit for the CRTC with the prepared values */
808static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
809{
810 struct drm_device *dev = crtc->dev;
811 struct drm_psb_private *dev_priv = dev->dev_private;
812 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
813 int palreg = PALETTE_A;
814 int i;
815
816 /* The clocks have to be on to load the palette. */
817 if (!crtc->enabled)
818 return;
819
820 switch (psb_intel_crtc->pipe) {
821 case 0:
822 break;
823 case 1:
824 palreg = PALETTE_B;
825 break;
826 case 2:
827 palreg = PALETTE_C;
828 break;
829 default:
830 dev_err(dev->dev, "Illegal Pipe Number.\n");
831 return;
832 }
833
834 if (gma_power_begin(dev, false)) {
835 for (i = 0; i < 256; i++) {
836 REG_WRITE(palreg + 4 * i,
837 ((psb_intel_crtc->lut_r[i] +
838 psb_intel_crtc->lut_adj[i]) << 16) |
839 ((psb_intel_crtc->lut_g[i] +
840 psb_intel_crtc->lut_adj[i]) << 8) |
841 (psb_intel_crtc->lut_b[i] +
842 psb_intel_crtc->lut_adj[i]));
843 }
844 gma_power_end(dev);
845 } else {
846 for (i = 0; i < 256; i++) {
847 dev_priv->regs.pipe[0].palette[i] =
848 ((psb_intel_crtc->lut_r[i] +
849 psb_intel_crtc->lut_adj[i]) << 16) |
850 ((psb_intel_crtc->lut_g[i] +
851 psb_intel_crtc->lut_adj[i]) << 8) |
852 (psb_intel_crtc->lut_b[i] +
853 psb_intel_crtc->lut_adj[i]);
854 }
855
856 }
857}
858
859/**
860 * Sets the power management mode of the pipe and plane.
861 *
862 * This code should probably grow support for turning the cursor off and back
863 * on appropriately at the same time as we're turning the pipe off/on.
864 */
865static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
866{
867 struct drm_device *dev = crtc->dev;
868 struct drm_psb_private *dev_priv = dev->dev_private;
869 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
870 int pipe = psb_intel_crtc->pipe;
871 const struct psb_offset *map = &dev_priv->regmap[pipe];
872 u32 temp;
873
874 /* XXX: When our outputs are all unaware of DPMS modes other than off
875 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
876 */
877 cdv_intel_disable_self_refresh(dev);
878
879 switch (mode) {
880 case DRM_MODE_DPMS_ON:
881 case DRM_MODE_DPMS_STANDBY:
882 case DRM_MODE_DPMS_SUSPEND:
883 if (psb_intel_crtc->active)
884 break;
885
886 psb_intel_crtc->active = true;
887
888 /* Enable the DPLL */
889 temp = REG_READ(map->dpll);
890 if ((temp & DPLL_VCO_ENABLE) == 0) {
891 REG_WRITE(map->dpll, temp);
892 REG_READ(map->dpll);
893 /* Wait for the clocks to stabilize. */
894 udelay(150);
895 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
896 REG_READ(map->dpll);
897 /* Wait for the clocks to stabilize. */
898 udelay(150);
899 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
900 REG_READ(map->dpll);
901 /* Wait for the clocks to stabilize. */
902 udelay(150);
903 }
904
905 /* Jim Bish - switch plan and pipe per scott */
906 /* Enable the plane */
907 temp = REG_READ(map->cntr);
908 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
909 REG_WRITE(map->cntr,
910 temp | DISPLAY_PLANE_ENABLE);
911 /* Flush the plane changes */
912 REG_WRITE(map->base, REG_READ(map->base));
913 }
914
915 udelay(150);
916
917 /* Enable the pipe */
918 temp = REG_READ(map->conf);
919 if ((temp & PIPEACONF_ENABLE) == 0)
920 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
921
922 temp = REG_READ(map->status);
923 temp &= ~(0xFFFF);
924 temp |= PIPE_FIFO_UNDERRUN;
925 REG_WRITE(map->status, temp);
926 REG_READ(map->status);
927
928 cdv_intel_crtc_load_lut(crtc);
929
930 /* Give the overlay scaler a chance to enable
931 * if it's on this pipe */
932 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
933 break;
934 case DRM_MODE_DPMS_OFF:
935 if (!psb_intel_crtc->active)
936 break;
937
938 psb_intel_crtc->active = false;
939
940 /* Give the overlay scaler a chance to disable
941 * if it's on this pipe */
942 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
943
944 /* Disable the VGA plane that we never use */
945 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
946
947 /* Jim Bish - changed pipe/plane here as well. */
948
949 drm_vblank_off(dev, pipe);
950 /* Wait for vblank for the disable to take effect */
951 cdv_intel_wait_for_vblank(dev);
952
953 /* Next, disable display pipes */
954 temp = REG_READ(map->conf);
955 if ((temp & PIPEACONF_ENABLE) != 0) {
956 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
957 REG_READ(map->conf);
958 }
959
960 /* Wait for vblank for the disable to take effect. */
961 cdv_intel_wait_for_vblank(dev);
962
963 udelay(150);
964
965 /* Disable display plane */
966 temp = REG_READ(map->cntr);
967 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
968 REG_WRITE(map->cntr,
969 temp & ~DISPLAY_PLANE_ENABLE);
970 /* Flush the plane changes */
971 REG_WRITE(map->base, REG_READ(map->base));
972 REG_READ(map->base);
973 }
974
975 temp = REG_READ(map->dpll);
976 if ((temp & DPLL_VCO_ENABLE) != 0) {
977 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
978 REG_READ(map->dpll);
979 }
980 590
981 /* Wait for the clocks to turn off. */ 591 dev_priv->ops->disable_sr(dev);
982 udelay(150);
983 break;
984 } 592 }
985 cdv_intel_update_watermark(dev, crtc);
986 /*Set FIFO Watermarks*/
987 REG_WRITE(DSPARB, 0x3F3E);
988}
989
990static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
991{
992 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
993 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
994}
995
996static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
997{
998 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
999 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
1000}
1001
1002static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
1003 const struct drm_display_mode *mode,
1004 struct drm_display_mode *adjusted_mode)
1005{
1006 return true;
1007} 593}
1008 594
1009
1010/** 595/**
1011 * Return the pipe currently connected to the panel fitter, 596 * Return the pipe currently connected to the panel fitter,
1012 * or -1 if the panel fitter is not present or not in use 597 * or -1 if the panel fitter is not present or not in use
@@ -1031,31 +616,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1031{ 616{
1032 struct drm_device *dev = crtc->dev; 617 struct drm_device *dev = crtc->dev;
1033 struct drm_psb_private *dev_priv = dev->dev_private; 618 struct drm_psb_private *dev_priv = dev->dev_private;
1034 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 619 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1035 int pipe = psb_intel_crtc->pipe; 620 int pipe = gma_crtc->pipe;
1036 const struct psb_offset *map = &dev_priv->regmap[pipe]; 621 const struct psb_offset *map = &dev_priv->regmap[pipe];
1037 int refclk; 622 int refclk;
1038 struct cdv_intel_clock_t clock; 623 struct gma_clock_t clock;
1039 u32 dpll = 0, dspcntr, pipeconf; 624 u32 dpll = 0, dspcntr, pipeconf;
1040 bool ok; 625 bool ok;
1041 bool is_crt = false, is_lvds = false, is_tv = false; 626 bool is_crt = false, is_lvds = false, is_tv = false;
1042 bool is_hdmi = false, is_dp = false; 627 bool is_hdmi = false, is_dp = false;
1043 struct drm_mode_config *mode_config = &dev->mode_config; 628 struct drm_mode_config *mode_config = &dev->mode_config;
1044 struct drm_connector *connector; 629 struct drm_connector *connector;
1045 const struct cdv_intel_limit_t *limit; 630 const struct gma_limit_t *limit;
1046 u32 ddi_select = 0; 631 u32 ddi_select = 0;
1047 bool is_edp = false; 632 bool is_edp = false;
1048 633
1049 list_for_each_entry(connector, &mode_config->connector_list, head) { 634 list_for_each_entry(connector, &mode_config->connector_list, head) {
1050 struct psb_intel_encoder *psb_intel_encoder = 635 struct gma_encoder *gma_encoder =
1051 psb_intel_attached_encoder(connector); 636 gma_attached_encoder(connector);
1052 637
1053 if (!connector->encoder 638 if (!connector->encoder
1054 || connector->encoder->crtc != crtc) 639 || connector->encoder->crtc != crtc)
1055 continue; 640 continue;
1056 641
1057 ddi_select = psb_intel_encoder->ddi_select; 642 ddi_select = gma_encoder->ddi_select;
1058 switch (psb_intel_encoder->type) { 643 switch (gma_encoder->type) {
1059 case INTEL_OUTPUT_LVDS: 644 case INTEL_OUTPUT_LVDS:
1060 is_lvds = true; 645 is_lvds = true;
1061 break; 646 break;
@@ -1108,12 +693,13 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1108 693
1109 drm_mode_debug_printmodeline(adjusted_mode); 694 drm_mode_debug_printmodeline(adjusted_mode);
1110 695
1111 limit = cdv_intel_limit(crtc, refclk); 696 limit = gma_crtc->clock_funcs->limit(crtc, refclk);
1112 697
1113 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, 698 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
1114 &clock); 699 &clock);
1115 if (!ok) { 700 if (!ok) {
1116 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); 701 DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
702 adjusted_mode->clock, clock.dot);
1117 return 0; 703 return 0;
1118 } 704 }
1119 705
@@ -1264,7 +850,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1264 REG_WRITE(map->conf, pipeconf); 850 REG_WRITE(map->conf, pipeconf);
1265 REG_READ(map->conf); 851 REG_READ(map->conf);
1266 852
1267 cdv_intel_wait_for_vblank(dev); 853 gma_wait_for_vblank(dev);
1268 854
1269 REG_WRITE(map->cntr, dspcntr); 855 REG_WRITE(map->cntr, dspcntr);
1270 856
@@ -1275,344 +861,16 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1275 crtc_funcs->mode_set_base(crtc, x, y, old_fb); 861 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
1276 } 862 }
1277 863
1278 cdv_intel_wait_for_vblank(dev); 864 gma_wait_for_vblank(dev);
1279
1280 return 0;
1281}
1282
1283
1284/**
1285 * Save HW states of giving crtc
1286 */
1287static void cdv_intel_crtc_save(struct drm_crtc *crtc)
1288{
1289 struct drm_device *dev = crtc->dev;
1290 struct drm_psb_private *dev_priv = dev->dev_private;
1291 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1292 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
1293 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
1294 uint32_t paletteReg;
1295 int i;
1296
1297 if (!crtc_state) {
1298 dev_dbg(dev->dev, "No CRTC state found\n");
1299 return;
1300 }
1301
1302 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
1303 crtc_state->savePIPECONF = REG_READ(map->conf);
1304 crtc_state->savePIPESRC = REG_READ(map->src);
1305 crtc_state->saveFP0 = REG_READ(map->fp0);
1306 crtc_state->saveFP1 = REG_READ(map->fp1);
1307 crtc_state->saveDPLL = REG_READ(map->dpll);
1308 crtc_state->saveHTOTAL = REG_READ(map->htotal);
1309 crtc_state->saveHBLANK = REG_READ(map->hblank);
1310 crtc_state->saveHSYNC = REG_READ(map->hsync);
1311 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
1312 crtc_state->saveVBLANK = REG_READ(map->vblank);
1313 crtc_state->saveVSYNC = REG_READ(map->vsync);
1314 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
1315
1316 /*NOTE: DSPSIZE DSPPOS only for psb*/
1317 crtc_state->saveDSPSIZE = REG_READ(map->size);
1318 crtc_state->saveDSPPOS = REG_READ(map->pos);
1319
1320 crtc_state->saveDSPBASE = REG_READ(map->base);
1321
1322 DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1323 crtc_state->saveDSPCNTR,
1324 crtc_state->savePIPECONF,
1325 crtc_state->savePIPESRC,
1326 crtc_state->saveFP0,
1327 crtc_state->saveFP1,
1328 crtc_state->saveDPLL,
1329 crtc_state->saveHTOTAL,
1330 crtc_state->saveHBLANK,
1331 crtc_state->saveHSYNC,
1332 crtc_state->saveVTOTAL,
1333 crtc_state->saveVBLANK,
1334 crtc_state->saveVSYNC,
1335 crtc_state->saveDSPSTRIDE,
1336 crtc_state->saveDSPSIZE,
1337 crtc_state->saveDSPPOS,
1338 crtc_state->saveDSPBASE
1339 );
1340
1341 paletteReg = map->palette;
1342 for (i = 0; i < 256; ++i)
1343 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
1344}
1345
1346/**
1347 * Restore HW states of giving crtc
1348 */
1349static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
1350{
1351 struct drm_device *dev = crtc->dev;
1352 struct drm_psb_private *dev_priv = dev->dev_private;
1353 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1354 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
1355 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
1356 uint32_t paletteReg;
1357 int i;
1358
1359 if (!crtc_state) {
1360 dev_dbg(dev->dev, "No crtc state\n");
1361 return;
1362 }
1363
1364 DRM_DEBUG(
1365 "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1366 REG_READ(map->cntr),
1367 REG_READ(map->conf),
1368 REG_READ(map->src),
1369 REG_READ(map->fp0),
1370 REG_READ(map->fp1),
1371 REG_READ(map->dpll),
1372 REG_READ(map->htotal),
1373 REG_READ(map->hblank),
1374 REG_READ(map->hsync),
1375 REG_READ(map->vtotal),
1376 REG_READ(map->vblank),
1377 REG_READ(map->vsync),
1378 REG_READ(map->stride),
1379 REG_READ(map->size),
1380 REG_READ(map->pos),
1381 REG_READ(map->base)
1382 );
1383
1384 DRM_DEBUG(
1385 "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1386 crtc_state->saveDSPCNTR,
1387 crtc_state->savePIPECONF,
1388 crtc_state->savePIPESRC,
1389 crtc_state->saveFP0,
1390 crtc_state->saveFP1,
1391 crtc_state->saveDPLL,
1392 crtc_state->saveHTOTAL,
1393 crtc_state->saveHBLANK,
1394 crtc_state->saveHSYNC,
1395 crtc_state->saveVTOTAL,
1396 crtc_state->saveVBLANK,
1397 crtc_state->saveVSYNC,
1398 crtc_state->saveDSPSTRIDE,
1399 crtc_state->saveDSPSIZE,
1400 crtc_state->saveDSPPOS,
1401 crtc_state->saveDSPBASE
1402 );
1403
1404
1405 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
1406 REG_WRITE(map->dpll,
1407 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
1408 REG_READ(map->dpll);
1409 DRM_DEBUG("write dpll: %x\n",
1410 REG_READ(map->dpll));
1411 udelay(150);
1412 }
1413
1414 REG_WRITE(map->fp0, crtc_state->saveFP0);
1415 REG_READ(map->fp0);
1416
1417 REG_WRITE(map->fp1, crtc_state->saveFP1);
1418 REG_READ(map->fp1);
1419
1420 REG_WRITE(map->dpll, crtc_state->saveDPLL);
1421 REG_READ(map->dpll);
1422 udelay(150);
1423
1424 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
1425 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
1426 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
1427 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
1428 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
1429 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
1430 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
1431
1432 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
1433 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
1434
1435 REG_WRITE(map->src, crtc_state->savePIPESRC);
1436 REG_WRITE(map->base, crtc_state->saveDSPBASE);
1437 REG_WRITE(map->conf, crtc_state->savePIPECONF);
1438
1439 cdv_intel_wait_for_vblank(dev);
1440
1441 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
1442 REG_WRITE(map->base, crtc_state->saveDSPBASE);
1443
1444 cdv_intel_wait_for_vblank(dev);
1445
1446 paletteReg = map->palette;
1447 for (i = 0; i < 256; ++i)
1448 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
1449}
1450
1451static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1452 struct drm_file *file_priv,
1453 uint32_t handle,
1454 uint32_t width, uint32_t height)
1455{
1456 struct drm_device *dev = crtc->dev;
1457 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1458 int pipe = psb_intel_crtc->pipe;
1459 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
1460 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
1461 uint32_t temp;
1462 size_t addr = 0;
1463 struct gtt_range *gt;
1464 struct drm_gem_object *obj;
1465 int ret = 0;
1466
1467 /* if we want to turn of the cursor ignore width and height */
1468 if (!handle) {
1469 /* turn off the cursor */
1470 temp = CURSOR_MODE_DISABLE;
1471
1472 if (gma_power_begin(dev, false)) {
1473 REG_WRITE(control, temp);
1474 REG_WRITE(base, 0);
1475 gma_power_end(dev);
1476 }
1477
1478 /* unpin the old GEM object */
1479 if (psb_intel_crtc->cursor_obj) {
1480 gt = container_of(psb_intel_crtc->cursor_obj,
1481 struct gtt_range, gem);
1482 psb_gtt_unpin(gt);
1483 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1484 psb_intel_crtc->cursor_obj = NULL;
1485 }
1486
1487 return 0;
1488 }
1489
1490 /* Currently we only support 64x64 cursors */
1491 if (width != 64 || height != 64) {
1492 dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
1493 return -EINVAL;
1494 }
1495
1496 obj = drm_gem_object_lookup(dev, file_priv, handle);
1497 if (!obj)
1498 return -ENOENT;
1499
1500 if (obj->size < width * height * 4) {
1501 dev_dbg(dev->dev, "buffer is to small\n");
1502 ret = -ENOMEM;
1503 goto unref_cursor;
1504 }
1505
1506 gt = container_of(obj, struct gtt_range, gem);
1507
1508 /* Pin the memory into the GTT */
1509 ret = psb_gtt_pin(gt);
1510 if (ret) {
1511 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
1512 goto unref_cursor;
1513 }
1514
1515 addr = gt->offset; /* Or resource.start ??? */
1516
1517 psb_intel_crtc->cursor_addr = addr;
1518
1519 temp = 0;
1520 /* set the pipe for the cursor */
1521 temp |= (pipe << 28);
1522 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1523
1524 if (gma_power_begin(dev, false)) {
1525 REG_WRITE(control, temp);
1526 REG_WRITE(base, addr);
1527 gma_power_end(dev);
1528 }
1529
1530 /* unpin the old GEM object */
1531 if (psb_intel_crtc->cursor_obj) {
1532 gt = container_of(psb_intel_crtc->cursor_obj,
1533 struct gtt_range, gem);
1534 psb_gtt_unpin(gt);
1535 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1536 }
1537
1538 psb_intel_crtc->cursor_obj = obj;
1539 return ret;
1540
1541unref_cursor:
1542 drm_gem_object_unreference(obj);
1543 return ret;
1544}
1545
1546static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1547{
1548 struct drm_device *dev = crtc->dev;
1549 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1550 int pipe = psb_intel_crtc->pipe;
1551 uint32_t temp = 0;
1552 uint32_t adder;
1553
1554
1555 if (x < 0) {
1556 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
1557 x = -x;
1558 }
1559 if (y < 0) {
1560 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
1561 y = -y;
1562 }
1563
1564 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
1565 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
1566 865
1567 adder = psb_intel_crtc->cursor_addr;
1568
1569 if (gma_power_begin(dev, false)) {
1570 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
1571 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
1572 gma_power_end(dev);
1573 }
1574 return 0; 866 return 0;
1575} 867}
1576 868
1577static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
1578 u16 *green, u16 *blue, uint32_t start, uint32_t size)
1579{
1580 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1581 int i;
1582 int end = (start + size > 256) ? 256 : start + size;
1583
1584 for (i = start; i < end; i++) {
1585 psb_intel_crtc->lut_r[i] = red[i] >> 8;
1586 psb_intel_crtc->lut_g[i] = green[i] >> 8;
1587 psb_intel_crtc->lut_b[i] = blue[i] >> 8;
1588 }
1589
1590 cdv_intel_crtc_load_lut(crtc);
1591}
1592
1593static int cdv_crtc_set_config(struct drm_mode_set *set)
1594{
1595 int ret = 0;
1596 struct drm_device *dev = set->crtc->dev;
1597 struct drm_psb_private *dev_priv = dev->dev_private;
1598
1599 if (!dev_priv->rpm_enabled)
1600 return drm_crtc_helper_set_config(set);
1601
1602 pm_runtime_forbid(&dev->pdev->dev);
1603
1604 ret = drm_crtc_helper_set_config(set);
1605
1606 pm_runtime_allow(&dev->pdev->dev);
1607
1608 return ret;
1609}
1610
1611/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 869/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
1612 870
1613/* FIXME: why are we using this, should it be cdv_ in this tree ? */ 871/* FIXME: why are we using this, should it be cdv_ in this tree ? */
1614 872
1615static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock) 873static void i8xx_clock(int refclk, struct gma_clock_t *clock)
1616{ 874{
1617 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 875 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
1618 clock->p = clock->p1 * clock->p2; 876 clock->p = clock->p1 * clock->p2;
@@ -1625,12 +883,12 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
1625 struct drm_crtc *crtc) 883 struct drm_crtc *crtc)
1626{ 884{
1627 struct drm_psb_private *dev_priv = dev->dev_private; 885 struct drm_psb_private *dev_priv = dev->dev_private;
1628 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 886 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1629 int pipe = psb_intel_crtc->pipe; 887 int pipe = gma_crtc->pipe;
1630 const struct psb_offset *map = &dev_priv->regmap[pipe]; 888 const struct psb_offset *map = &dev_priv->regmap[pipe];
1631 u32 dpll; 889 u32 dpll;
1632 u32 fp; 890 u32 fp;
1633 struct cdv_intel_clock_t clock; 891 struct gma_clock_t clock;
1634 bool is_lvds; 892 bool is_lvds;
1635 struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; 893 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1636 894
@@ -1703,8 +961,8 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
1703struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, 961struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
1704 struct drm_crtc *crtc) 962 struct drm_crtc *crtc)
1705{ 963{
1706 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 964 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1707 int pipe = psb_intel_crtc->pipe; 965 int pipe = gma_crtc->pipe;
1708 struct drm_psb_private *dev_priv = dev->dev_private; 966 struct drm_psb_private *dev_priv = dev->dev_private;
1709 struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; 967 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1710 const struct psb_offset *map = &dev_priv->regmap[pipe]; 968 const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -1747,44 +1005,28 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
1747 return mode; 1005 return mode;
1748} 1006}
1749 1007
1750static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
1751{
1752 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1753
1754 kfree(psb_intel_crtc->crtc_state);
1755 drm_crtc_cleanup(crtc);
1756 kfree(psb_intel_crtc);
1757}
1758
1759static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
1760{
1761 struct gtt_range *gt;
1762 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1763
1764 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1765
1766 if (crtc->fb) {
1767 gt = to_psb_fb(crtc->fb)->gtt;
1768 psb_gtt_unpin(gt);
1769 }
1770}
1771
1772const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { 1008const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1773 .dpms = cdv_intel_crtc_dpms, 1009 .dpms = gma_crtc_dpms,
1774 .mode_fixup = cdv_intel_crtc_mode_fixup, 1010 .mode_fixup = gma_crtc_mode_fixup,
1775 .mode_set = cdv_intel_crtc_mode_set, 1011 .mode_set = cdv_intel_crtc_mode_set,
1776 .mode_set_base = cdv_intel_pipe_set_base, 1012 .mode_set_base = gma_pipe_set_base,
1777 .prepare = cdv_intel_crtc_prepare, 1013 .prepare = gma_crtc_prepare,
1778 .commit = cdv_intel_crtc_commit, 1014 .commit = gma_crtc_commit,
1779 .disable = cdv_intel_crtc_disable, 1015 .disable = gma_crtc_disable,
1780}; 1016};
1781 1017
1782const struct drm_crtc_funcs cdv_intel_crtc_funcs = { 1018const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
1783 .save = cdv_intel_crtc_save, 1019 .save = gma_crtc_save,
1784 .restore = cdv_intel_crtc_restore, 1020 .restore = gma_crtc_restore,
1785 .cursor_set = cdv_intel_crtc_cursor_set, 1021 .cursor_set = gma_crtc_cursor_set,
1786 .cursor_move = cdv_intel_crtc_cursor_move, 1022 .cursor_move = gma_crtc_cursor_move,
1787 .gamma_set = cdv_intel_crtc_gamma_set, 1023 .gamma_set = gma_crtc_gamma_set,
1788 .set_config = cdv_crtc_set_config, 1024 .set_config = gma_crtc_set_config,
1789 .destroy = cdv_intel_crtc_destroy, 1025 .destroy = gma_crtc_destroy,
1026};
1027
1028const struct gma_clock_funcs cdv_clock_funcs = {
1029 .clock = cdv_intel_clock,
1030 .limit = cdv_intel_limit,
1031 .pll_is_valid = gma_pll_is_valid,
1790}; 1032};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 88d9ef6b5b4a..f4eb43573cad 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -34,6 +34,7 @@
34#include "psb_drv.h" 34#include "psb_drv.h"
35#include "psb_intel_drv.h" 35#include "psb_intel_drv.h"
36#include "psb_intel_reg.h" 36#include "psb_intel_reg.h"
37#include "gma_display.h"
37#include <drm/drm_dp_helper.h> 38#include <drm/drm_dp_helper.h>
38 39
39#define _wait_for(COND, MS, W) ({ \ 40#define _wait_for(COND, MS, W) ({ \
@@ -68,7 +69,7 @@ struct cdv_intel_dp {
68 uint8_t link_bw; 69 uint8_t link_bw;
69 uint8_t lane_count; 70 uint8_t lane_count;
70 uint8_t dpcd[4]; 71 uint8_t dpcd[4];
71 struct psb_intel_encoder *encoder; 72 struct gma_encoder *encoder;
72 struct i2c_adapter adapter; 73 struct i2c_adapter adapter;
73 struct i2c_algo_dp_aux_data algo; 74 struct i2c_algo_dp_aux_data algo;
74 uint8_t train_set[4]; 75 uint8_t train_set[4];
@@ -114,18 +115,18 @@ static uint32_t dp_vswing_premph_table[] = {
114 * If a CPU or PCH DP output is attached to an eDP panel, this function 115 * If a CPU or PCH DP output is attached to an eDP panel, this function
115 * will return true, and false otherwise. 116 * will return true, and false otherwise.
116 */ 117 */
117static bool is_edp(struct psb_intel_encoder *encoder) 118static bool is_edp(struct gma_encoder *encoder)
118{ 119{
119 return encoder->type == INTEL_OUTPUT_EDP; 120 return encoder->type == INTEL_OUTPUT_EDP;
120} 121}
121 122
122 123
123static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder); 124static void cdv_intel_dp_start_link_train(struct gma_encoder *encoder);
124static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder); 125static void cdv_intel_dp_complete_link_train(struct gma_encoder *encoder);
125static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder); 126static void cdv_intel_dp_link_down(struct gma_encoder *encoder);
126 127
127static int 128static int
128cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder) 129cdv_intel_dp_max_lane_count(struct gma_encoder *encoder)
129{ 130{
130 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 131 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
131 int max_lane_count = 4; 132 int max_lane_count = 4;
@@ -143,7 +144,7 @@ cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
143} 144}
144 145
145static int 146static int
146cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder) 147cdv_intel_dp_max_link_bw(struct gma_encoder *encoder)
147{ 148{
148 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 149 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
149 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 150 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -180,7 +181,7 @@ cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
180 return (max_link_clock * max_lanes * 19) / 20; 181 return (max_link_clock * max_lanes * 19) / 20;
181} 182}
182 183
183static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder) 184static void cdv_intel_edp_panel_vdd_on(struct gma_encoder *intel_encoder)
184{ 185{
185 struct drm_device *dev = intel_encoder->base.dev; 186 struct drm_device *dev = intel_encoder->base.dev;
186 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 187 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -200,7 +201,7 @@ static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
200 msleep(intel_dp->panel_power_up_delay); 201 msleep(intel_dp->panel_power_up_delay);
201} 202}
202 203
203static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder) 204static void cdv_intel_edp_panel_vdd_off(struct gma_encoder *intel_encoder)
204{ 205{
205 struct drm_device *dev = intel_encoder->base.dev; 206 struct drm_device *dev = intel_encoder->base.dev;
206 u32 pp; 207 u32 pp;
@@ -215,7 +216,7 @@ static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
215} 216}
216 217
217/* Returns true if the panel was already on when called */ 218/* Returns true if the panel was already on when called */
218static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder) 219static bool cdv_intel_edp_panel_on(struct gma_encoder *intel_encoder)
219{ 220{
220 struct drm_device *dev = intel_encoder->base.dev; 221 struct drm_device *dev = intel_encoder->base.dev;
221 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 222 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -242,7 +243,7 @@ static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
242 return false; 243 return false;
243} 244}
244 245
245static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder) 246static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder)
246{ 247{
247 struct drm_device *dev = intel_encoder->base.dev; 248 struct drm_device *dev = intel_encoder->base.dev;
248 u32 pp, idle_off_mask = PP_ON ; 249 u32 pp, idle_off_mask = PP_ON ;
@@ -274,7 +275,7 @@ static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
274 DRM_DEBUG_KMS("Over\n"); 275 DRM_DEBUG_KMS("Over\n");
275} 276}
276 277
277static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder) 278static void cdv_intel_edp_backlight_on (struct gma_encoder *intel_encoder)
278{ 279{
279 struct drm_device *dev = intel_encoder->base.dev; 280 struct drm_device *dev = intel_encoder->base.dev;
280 u32 pp; 281 u32 pp;
@@ -294,7 +295,7 @@ static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
294 gma_backlight_enable(dev); 295 gma_backlight_enable(dev);
295} 296}
296 297
297static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder) 298static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
298{ 299{
299 struct drm_device *dev = intel_encoder->base.dev; 300 struct drm_device *dev = intel_encoder->base.dev;
300 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 301 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -314,7 +315,7 @@ static int
314cdv_intel_dp_mode_valid(struct drm_connector *connector, 315cdv_intel_dp_mode_valid(struct drm_connector *connector,
315 struct drm_display_mode *mode) 316 struct drm_display_mode *mode)
316{ 317{
317 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 318 struct gma_encoder *encoder = gma_attached_encoder(connector);
318 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 319 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
319 int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder)); 320 int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
320 int max_lanes = cdv_intel_dp_max_lane_count(encoder); 321 int max_lanes = cdv_intel_dp_max_lane_count(encoder);
@@ -370,7 +371,7 @@ unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
370} 371}
371 372
372static int 373static int
373cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder, 374cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
374 uint8_t *send, int send_bytes, 375 uint8_t *send, int send_bytes,
375 uint8_t *recv, int recv_size) 376 uint8_t *recv, int recv_size)
376{ 377{
@@ -472,7 +473,7 @@ cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
472 473
473/* Write data to the aux channel in native mode */ 474/* Write data to the aux channel in native mode */
474static int 475static int
475cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder, 476cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
476 uint16_t address, uint8_t *send, int send_bytes) 477 uint16_t address, uint8_t *send, int send_bytes)
477{ 478{
478 int ret; 479 int ret;
@@ -504,7 +505,7 @@ cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
504 505
505/* Write a single byte to the aux channel in native mode */ 506/* Write a single byte to the aux channel in native mode */
506static int 507static int
507cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder, 508cdv_intel_dp_aux_native_write_1(struct gma_encoder *encoder,
508 uint16_t address, uint8_t byte) 509 uint16_t address, uint8_t byte)
509{ 510{
510 return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1); 511 return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
@@ -512,7 +513,7 @@ cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
512 513
513/* read bytes from a native aux channel */ 514/* read bytes from a native aux channel */
514static int 515static int
515cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder, 516cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
516 uint16_t address, uint8_t *recv, int recv_bytes) 517 uint16_t address, uint8_t *recv, int recv_bytes)
517{ 518{
518 uint8_t msg[4]; 519 uint8_t msg[4];
@@ -557,7 +558,7 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
557 struct cdv_intel_dp *intel_dp = container_of(adapter, 558 struct cdv_intel_dp *intel_dp = container_of(adapter,
558 struct cdv_intel_dp, 559 struct cdv_intel_dp,
559 adapter); 560 adapter);
560 struct psb_intel_encoder *encoder = intel_dp->encoder; 561 struct gma_encoder *encoder = intel_dp->encoder;
561 uint16_t address = algo_data->address; 562 uint16_t address = algo_data->address;
562 uint8_t msg[5]; 563 uint8_t msg[5];
563 uint8_t reply[2]; 564 uint8_t reply[2];
@@ -647,7 +648,8 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
647} 648}
648 649
649static int 650static int
650cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name) 651cdv_intel_dp_i2c_init(struct gma_connector *connector,
652 struct gma_encoder *encoder, const char *name)
651{ 653{
652 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 654 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
653 int ret; 655 int ret;
@@ -698,7 +700,7 @@ cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mo
698 struct drm_display_mode *adjusted_mode) 700 struct drm_display_mode *adjusted_mode)
699{ 701{
700 struct drm_psb_private *dev_priv = encoder->dev->dev_private; 702 struct drm_psb_private *dev_priv = encoder->dev->dev_private;
701 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 703 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
702 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 704 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
703 int lane_count, clock; 705 int lane_count, clock;
704 int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder); 706 int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
@@ -792,22 +794,22 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
792 struct drm_psb_private *dev_priv = dev->dev_private; 794 struct drm_psb_private *dev_priv = dev->dev_private;
793 struct drm_mode_config *mode_config = &dev->mode_config; 795 struct drm_mode_config *mode_config = &dev->mode_config;
794 struct drm_encoder *encoder; 796 struct drm_encoder *encoder;
795 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 797 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
796 int lane_count = 4, bpp = 24; 798 int lane_count = 4, bpp = 24;
797 struct cdv_intel_dp_m_n m_n; 799 struct cdv_intel_dp_m_n m_n;
798 int pipe = intel_crtc->pipe; 800 int pipe = gma_crtc->pipe;
799 801
800 /* 802 /*
801 * Find the lane count in the intel_encoder private 803 * Find the lane count in the intel_encoder private
802 */ 804 */
803 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 805 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
804 struct psb_intel_encoder *intel_encoder; 806 struct gma_encoder *intel_encoder;
805 struct cdv_intel_dp *intel_dp; 807 struct cdv_intel_dp *intel_dp;
806 808
807 if (encoder->crtc != crtc) 809 if (encoder->crtc != crtc)
808 continue; 810 continue;
809 811
810 intel_encoder = to_psb_intel_encoder(encoder); 812 intel_encoder = to_gma_encoder(encoder);
811 intel_dp = intel_encoder->dev_priv; 813 intel_dp = intel_encoder->dev_priv;
812 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 814 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
813 lane_count = intel_dp->lane_count; 815 lane_count = intel_dp->lane_count;
@@ -841,9 +843,9 @@ static void
841cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 843cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
842 struct drm_display_mode *adjusted_mode) 844 struct drm_display_mode *adjusted_mode)
843{ 845{
844 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 846 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
845 struct drm_crtc *crtc = encoder->crtc; 847 struct drm_crtc *crtc = encoder->crtc;
846 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 848 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
847 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 849 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
848 struct drm_device *dev = encoder->dev; 850 struct drm_device *dev = encoder->dev;
849 851
@@ -885,7 +887,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
885 } 887 }
886 888
887 /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 889 /* CPT DP's pipe select is decided in TRANS_DP_CTL */
888 if (intel_crtc->pipe == 1) 890 if (gma_crtc->pipe == 1)
889 intel_dp->DP |= DP_PIPEB_SELECT; 891 intel_dp->DP |= DP_PIPEB_SELECT;
890 892
891 REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN)); 893 REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
@@ -900,7 +902,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
900 else 902 else
901 pfit_control = 0; 903 pfit_control = 0;
902 904
903 pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; 905 pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
904 906
905 REG_WRITE(PFIT_CONTROL, pfit_control); 907 REG_WRITE(PFIT_CONTROL, pfit_control);
906 } 908 }
@@ -908,7 +910,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
908 910
909 911
910/* If the sink supports it, try to set the power state appropriately */ 912/* If the sink supports it, try to set the power state appropriately */
911static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode) 913static void cdv_intel_dp_sink_dpms(struct gma_encoder *encoder, int mode)
912{ 914{
913 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 915 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
914 int ret, i; 916 int ret, i;
@@ -940,7 +942,7 @@ static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
940 942
941static void cdv_intel_dp_prepare(struct drm_encoder *encoder) 943static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
942{ 944{
943 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 945 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
944 int edp = is_edp(intel_encoder); 946 int edp = is_edp(intel_encoder);
945 947
946 if (edp) { 948 if (edp) {
@@ -957,7 +959,7 @@ static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
957 959
958static void cdv_intel_dp_commit(struct drm_encoder *encoder) 960static void cdv_intel_dp_commit(struct drm_encoder *encoder)
959{ 961{
960 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 962 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
961 int edp = is_edp(intel_encoder); 963 int edp = is_edp(intel_encoder);
962 964
963 if (edp) 965 if (edp)
@@ -971,7 +973,7 @@ static void cdv_intel_dp_commit(struct drm_encoder *encoder)
971static void 973static void
972cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode) 974cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
973{ 975{
974 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 976 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
975 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 977 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
976 struct drm_device *dev = encoder->dev; 978 struct drm_device *dev = encoder->dev;
977 uint32_t dp_reg = REG_READ(intel_dp->output_reg); 979 uint32_t dp_reg = REG_READ(intel_dp->output_reg);
@@ -1006,7 +1008,7 @@ cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
1006 * cases where the sink may still be asleep. 1008 * cases where the sink may still be asleep.
1007 */ 1009 */
1008static bool 1010static bool
1009cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address, 1011cdv_intel_dp_aux_native_read_retry(struct gma_encoder *encoder, uint16_t address,
1010 uint8_t *recv, int recv_bytes) 1012 uint8_t *recv, int recv_bytes)
1011{ 1013{
1012 int ret, i; 1014 int ret, i;
@@ -1031,7 +1033,7 @@ cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t a
1031 * link status information 1033 * link status information
1032 */ 1034 */
1033static bool 1035static bool
1034cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder) 1036cdv_intel_dp_get_link_status(struct gma_encoder *encoder)
1035{ 1037{
1036 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1038 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1037 return cdv_intel_dp_aux_native_read_retry(encoder, 1039 return cdv_intel_dp_aux_native_read_retry(encoder,
@@ -1105,7 +1107,7 @@ cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
1105} 1107}
1106*/ 1108*/
1107static void 1109static void
1108cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder) 1110cdv_intel_get_adjust_train(struct gma_encoder *encoder)
1109{ 1111{
1110 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1112 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1111 uint8_t v = 0; 1113 uint8_t v = 0;
@@ -1164,7 +1166,7 @@ cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_c
1164 DP_LANE_CHANNEL_EQ_DONE|\ 1166 DP_LANE_CHANNEL_EQ_DONE|\
1165 DP_LANE_SYMBOL_LOCKED) 1167 DP_LANE_SYMBOL_LOCKED)
1166static bool 1168static bool
1167cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder) 1169cdv_intel_channel_eq_ok(struct gma_encoder *encoder)
1168{ 1170{
1169 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1171 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1170 uint8_t lane_align; 1172 uint8_t lane_align;
@@ -1184,7 +1186,7 @@ cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
1184} 1186}
1185 1187
1186static bool 1188static bool
1187cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder, 1189cdv_intel_dp_set_link_train(struct gma_encoder *encoder,
1188 uint32_t dp_reg_value, 1190 uint32_t dp_reg_value,
1189 uint8_t dp_train_pat) 1191 uint8_t dp_train_pat)
1190{ 1192{
@@ -1211,7 +1213,7 @@ cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
1211 1213
1212 1214
1213static bool 1215static bool
1214cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder, 1216cdv_intel_dplink_set_level(struct gma_encoder *encoder,
1215 uint8_t dp_train_pat) 1217 uint8_t dp_train_pat)
1216{ 1218{
1217 1219
@@ -1232,7 +1234,7 @@ cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
1232} 1234}
1233 1235
1234static void 1236static void
1235cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level) 1237cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level)
1236{ 1238{
1237 struct drm_device *dev = encoder->base.dev; 1239 struct drm_device *dev = encoder->base.dev;
1238 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1240 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1298,7 +1300,7 @@ cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal
1298 1300
1299/* Enable corresponding port and start training pattern 1 */ 1301/* Enable corresponding port and start training pattern 1 */
1300static void 1302static void
1301cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder) 1303cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
1302{ 1304{
1303 struct drm_device *dev = encoder->base.dev; 1305 struct drm_device *dev = encoder->base.dev;
1304 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1306 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1317,7 +1319,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
1317 /* Enable output, wait for it to become active */ 1319 /* Enable output, wait for it to become active */
1318 REG_WRITE(intel_dp->output_reg, reg); 1320 REG_WRITE(intel_dp->output_reg, reg);
1319 REG_READ(intel_dp->output_reg); 1321 REG_READ(intel_dp->output_reg);
1320 psb_intel_wait_for_vblank(dev); 1322 gma_wait_for_vblank(dev);
1321 1323
1322 DRM_DEBUG_KMS("Link config\n"); 1324 DRM_DEBUG_KMS("Link config\n");
1323 /* Write the link configuration data */ 1325 /* Write the link configuration data */
@@ -1392,7 +1394,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
1392} 1394}
1393 1395
1394static void 1396static void
1395cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder) 1397cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
1396{ 1398{
1397 struct drm_device *dev = encoder->base.dev; 1399 struct drm_device *dev = encoder->base.dev;
1398 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1400 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1478,7 +1480,7 @@ cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
1478} 1480}
1479 1481
1480static void 1482static void
1481cdv_intel_dp_link_down(struct psb_intel_encoder *encoder) 1483cdv_intel_dp_link_down(struct gma_encoder *encoder)
1482{ 1484{
1483 struct drm_device *dev = encoder->base.dev; 1485 struct drm_device *dev = encoder->base.dev;
1484 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1486 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1502,8 +1504,7 @@ cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
1502 REG_READ(intel_dp->output_reg); 1504 REG_READ(intel_dp->output_reg);
1503} 1505}
1504 1506
1505static enum drm_connector_status 1507static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
1506cdv_dp_detect(struct psb_intel_encoder *encoder)
1507{ 1508{
1508 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1509 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1509 enum drm_connector_status status; 1510 enum drm_connector_status status;
@@ -1531,7 +1532,7 @@ cdv_dp_detect(struct psb_intel_encoder *encoder)
1531static enum drm_connector_status 1532static enum drm_connector_status
1532cdv_intel_dp_detect(struct drm_connector *connector, bool force) 1533cdv_intel_dp_detect(struct drm_connector *connector, bool force)
1533{ 1534{
1534 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 1535 struct gma_encoder *encoder = gma_attached_encoder(connector);
1535 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1536 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1536 enum drm_connector_status status; 1537 enum drm_connector_status status;
1537 struct edid *edid = NULL; 1538 struct edid *edid = NULL;
@@ -1565,7 +1566,7 @@ cdv_intel_dp_detect(struct drm_connector *connector, bool force)
1565 1566
1566static int cdv_intel_dp_get_modes(struct drm_connector *connector) 1567static int cdv_intel_dp_get_modes(struct drm_connector *connector)
1567{ 1568{
1568 struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector); 1569 struct gma_encoder *intel_encoder = gma_attached_encoder(connector);
1569 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 1570 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
1570 struct edid *edid = NULL; 1571 struct edid *edid = NULL;
1571 int ret = 0; 1572 int ret = 0;
@@ -1621,7 +1622,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
1621static bool 1622static bool
1622cdv_intel_dp_detect_audio(struct drm_connector *connector) 1623cdv_intel_dp_detect_audio(struct drm_connector *connector)
1623{ 1624{
1624 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 1625 struct gma_encoder *encoder = gma_attached_encoder(connector);
1625 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1626 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1626 struct edid *edid; 1627 struct edid *edid;
1627 bool has_audio = false; 1628 bool has_audio = false;
@@ -1647,7 +1648,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
1647 uint64_t val) 1648 uint64_t val)
1648{ 1649{
1649 struct drm_psb_private *dev_priv = connector->dev->dev_private; 1650 struct drm_psb_private *dev_priv = connector->dev->dev_private;
1650 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 1651 struct gma_encoder *encoder = gma_attached_encoder(connector);
1651 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1652 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1652 int ret; 1653 int ret;
1653 1654
@@ -1700,11 +1701,10 @@ done:
1700static void 1701static void
1701cdv_intel_dp_destroy(struct drm_connector *connector) 1702cdv_intel_dp_destroy(struct drm_connector *connector)
1702{ 1703{
1703 struct psb_intel_encoder *psb_intel_encoder = 1704 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
1704 psb_intel_attached_encoder(connector); 1705 struct cdv_intel_dp *intel_dp = gma_encoder->dev_priv;
1705 struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
1706 1706
1707 if (is_edp(psb_intel_encoder)) { 1707 if (is_edp(gma_encoder)) {
1708 /* cdv_intel_panel_destroy_backlight(connector->dev); */ 1708 /* cdv_intel_panel_destroy_backlight(connector->dev); */
1709 if (intel_dp->panel_fixed_mode) { 1709 if (intel_dp->panel_fixed_mode) {
1710 kfree(intel_dp->panel_fixed_mode); 1710 kfree(intel_dp->panel_fixed_mode);
@@ -1741,7 +1741,7 @@ static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
1741static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = { 1741static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
1742 .get_modes = cdv_intel_dp_get_modes, 1742 .get_modes = cdv_intel_dp_get_modes,
1743 .mode_valid = cdv_intel_dp_mode_valid, 1743 .mode_valid = cdv_intel_dp_mode_valid,
1744 .best_encoder = psb_intel_best_encoder, 1744 .best_encoder = gma_best_encoder,
1745}; 1745};
1746 1746
1747static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = { 1747static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
@@ -1800,19 +1800,19 @@ static void cdv_disable_intel_clock_gating(struct drm_device *dev)
1800void 1800void
1801cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg) 1801cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
1802{ 1802{
1803 struct psb_intel_encoder *psb_intel_encoder; 1803 struct gma_encoder *gma_encoder;
1804 struct psb_intel_connector *psb_intel_connector; 1804 struct gma_connector *gma_connector;
1805 struct drm_connector *connector; 1805 struct drm_connector *connector;
1806 struct drm_encoder *encoder; 1806 struct drm_encoder *encoder;
1807 struct cdv_intel_dp *intel_dp; 1807 struct cdv_intel_dp *intel_dp;
1808 const char *name = NULL; 1808 const char *name = NULL;
1809 int type = DRM_MODE_CONNECTOR_DisplayPort; 1809 int type = DRM_MODE_CONNECTOR_DisplayPort;
1810 1810
1811 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 1811 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
1812 if (!psb_intel_encoder) 1812 if (!gma_encoder)
1813 return; 1813 return;
1814 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 1814 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
1815 if (!psb_intel_connector) 1815 if (!gma_connector)
1816 goto err_connector; 1816 goto err_connector;
1817 intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL); 1817 intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
1818 if (!intel_dp) 1818 if (!intel_dp)
@@ -1821,22 +1821,22 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1821 if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev)) 1821 if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
1822 type = DRM_MODE_CONNECTOR_eDP; 1822 type = DRM_MODE_CONNECTOR_eDP;
1823 1823
1824 connector = &psb_intel_connector->base; 1824 connector = &gma_connector->base;
1825 encoder = &psb_intel_encoder->base; 1825 encoder = &gma_encoder->base;
1826 1826
1827 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); 1827 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
1828 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); 1828 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
1829 1829
1830 psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); 1830 gma_connector_attach_encoder(gma_connector, gma_encoder);
1831 1831
1832 if (type == DRM_MODE_CONNECTOR_DisplayPort) 1832 if (type == DRM_MODE_CONNECTOR_DisplayPort)
1833 psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 1833 gma_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1834 else 1834 else
1835 psb_intel_encoder->type = INTEL_OUTPUT_EDP; 1835 gma_encoder->type = INTEL_OUTPUT_EDP;
1836 1836
1837 1837
1838 psb_intel_encoder->dev_priv=intel_dp; 1838 gma_encoder->dev_priv=intel_dp;
1839 intel_dp->encoder = psb_intel_encoder; 1839 intel_dp->encoder = gma_encoder;
1840 intel_dp->output_reg = output_reg; 1840 intel_dp->output_reg = output_reg;
1841 1841
1842 drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs); 1842 drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
@@ -1852,21 +1852,21 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1852 switch (output_reg) { 1852 switch (output_reg) {
1853 case DP_B: 1853 case DP_B:
1854 name = "DPDDC-B"; 1854 name = "DPDDC-B";
1855 psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT); 1855 gma_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
1856 break; 1856 break;
1857 case DP_C: 1857 case DP_C:
1858 name = "DPDDC-C"; 1858 name = "DPDDC-C";
1859 psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT); 1859 gma_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
1860 break; 1860 break;
1861 } 1861 }
1862 1862
1863 cdv_disable_intel_clock_gating(dev); 1863 cdv_disable_intel_clock_gating(dev);
1864 1864
1865 cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name); 1865 cdv_intel_dp_i2c_init(gma_connector, gma_encoder, name);
1866 /* FIXME:fail check */ 1866 /* FIXME:fail check */
1867 cdv_intel_dp_add_properties(connector); 1867 cdv_intel_dp_add_properties(connector);
1868 1868
1869 if (is_edp(psb_intel_encoder)) { 1869 if (is_edp(gma_encoder)) {
1870 int ret; 1870 int ret;
1871 struct edp_power_seq cur; 1871 struct edp_power_seq cur;
1872 u32 pp_on, pp_off, pp_div; 1872 u32 pp_on, pp_off, pp_div;
@@ -1920,11 +1920,11 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1920 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 1920 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
1921 1921
1922 1922
1923 cdv_intel_edp_panel_vdd_on(psb_intel_encoder); 1923 cdv_intel_edp_panel_vdd_on(gma_encoder);
1924 ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV, 1924 ret = cdv_intel_dp_aux_native_read(gma_encoder, DP_DPCD_REV,
1925 intel_dp->dpcd, 1925 intel_dp->dpcd,
1926 sizeof(intel_dp->dpcd)); 1926 sizeof(intel_dp->dpcd));
1927 cdv_intel_edp_panel_vdd_off(psb_intel_encoder); 1927 cdv_intel_edp_panel_vdd_off(gma_encoder);
1928 if (ret == 0) { 1928 if (ret == 0) {
1929 /* if this fails, presume the device is a ghost */ 1929 /* if this fails, presume the device is a ghost */
1930 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 1930 DRM_INFO("failed to retrieve link info, disabling eDP\n");
@@ -1945,7 +1945,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1945 return; 1945 return;
1946 1946
1947err_priv: 1947err_priv:
1948 kfree(psb_intel_connector); 1948 kfree(gma_connector);
1949err_connector: 1949err_connector:
1950 kfree(psb_intel_encoder); 1950 kfree(gma_encoder);
1951} 1951}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 464153d9d2df..1c0d723b8d24 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -64,11 +64,11 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
64 struct drm_display_mode *adjusted_mode) 64 struct drm_display_mode *adjusted_mode)
65{ 65{
66 struct drm_device *dev = encoder->dev; 66 struct drm_device *dev = encoder->dev;
67 struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder); 67 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
68 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; 68 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
69 u32 hdmib; 69 u32 hdmib;
70 struct drm_crtc *crtc = encoder->crtc; 70 struct drm_crtc *crtc = encoder->crtc;
71 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 71 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
72 72
73 hdmib = (2 << 10); 73 hdmib = (2 << 10);
74 74
@@ -77,7 +77,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
77 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 77 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
78 hdmib |= HDMI_HSYNC_ACTIVE_HIGH; 78 hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
79 79
80 if (intel_crtc->pipe == 1) 80 if (gma_crtc->pipe == 1)
81 hdmib |= HDMIB_PIPE_B_SELECT; 81 hdmib |= HDMIB_PIPE_B_SELECT;
82 82
83 if (hdmi_priv->has_hdmi_audio) { 83 if (hdmi_priv->has_hdmi_audio) {
@@ -99,9 +99,8 @@ static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
99static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode) 99static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
100{ 100{
101 struct drm_device *dev = encoder->dev; 101 struct drm_device *dev = encoder->dev;
102 struct psb_intel_encoder *psb_intel_encoder = 102 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
103 to_psb_intel_encoder(encoder); 103 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
104 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
105 u32 hdmib; 104 u32 hdmib;
106 105
107 hdmib = REG_READ(hdmi_priv->hdmi_reg); 106 hdmib = REG_READ(hdmi_priv->hdmi_reg);
@@ -116,9 +115,8 @@ static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
116static void cdv_hdmi_save(struct drm_connector *connector) 115static void cdv_hdmi_save(struct drm_connector *connector)
117{ 116{
118 struct drm_device *dev = connector->dev; 117 struct drm_device *dev = connector->dev;
119 struct psb_intel_encoder *psb_intel_encoder = 118 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
120 psb_intel_attached_encoder(connector); 119 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
121 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
122 120
123 hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg); 121 hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
124} 122}
@@ -126,9 +124,8 @@ static void cdv_hdmi_save(struct drm_connector *connector)
126static void cdv_hdmi_restore(struct drm_connector *connector) 124static void cdv_hdmi_restore(struct drm_connector *connector)
127{ 125{
128 struct drm_device *dev = connector->dev; 126 struct drm_device *dev = connector->dev;
129 struct psb_intel_encoder *psb_intel_encoder = 127 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
130 psb_intel_attached_encoder(connector); 128 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
131 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
132 129
133 REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB); 130 REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
134 REG_READ(hdmi_priv->hdmi_reg); 131 REG_READ(hdmi_priv->hdmi_reg);
@@ -137,13 +134,12 @@ static void cdv_hdmi_restore(struct drm_connector *connector)
137static enum drm_connector_status cdv_hdmi_detect( 134static enum drm_connector_status cdv_hdmi_detect(
138 struct drm_connector *connector, bool force) 135 struct drm_connector *connector, bool force)
139{ 136{
140 struct psb_intel_encoder *psb_intel_encoder = 137 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
141 psb_intel_attached_encoder(connector); 138 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
142 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
143 struct edid *edid = NULL; 139 struct edid *edid = NULL;
144 enum drm_connector_status status = connector_status_disconnected; 140 enum drm_connector_status status = connector_status_disconnected;
145 141
146 edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter); 142 edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
147 143
148 hdmi_priv->has_hdmi_sink = false; 144 hdmi_priv->has_hdmi_sink = false;
149 hdmi_priv->has_hdmi_audio = false; 145 hdmi_priv->has_hdmi_audio = false;
@@ -167,7 +163,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
167 struct drm_encoder *encoder = connector->encoder; 163 struct drm_encoder *encoder = connector->encoder;
168 164
169 if (!strcmp(property->name, "scaling mode") && encoder) { 165 if (!strcmp(property->name, "scaling mode") && encoder) {
170 struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc); 166 struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
171 bool centre; 167 bool centre;
172 uint64_t curValue; 168 uint64_t curValue;
173 169
@@ -221,12 +217,11 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
221 */ 217 */
222static int cdv_hdmi_get_modes(struct drm_connector *connector) 218static int cdv_hdmi_get_modes(struct drm_connector *connector)
223{ 219{
224 struct psb_intel_encoder *psb_intel_encoder = 220 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
225 psb_intel_attached_encoder(connector);
226 struct edid *edid = NULL; 221 struct edid *edid = NULL;
227 int ret = 0; 222 int ret = 0;
228 223
229 edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter); 224 edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
230 if (edid) { 225 if (edid) {
231 drm_mode_connector_update_edid_property(connector, edid); 226 drm_mode_connector_update_edid_property(connector, edid);
232 ret = drm_add_edid_modes(connector, edid); 227 ret = drm_add_edid_modes(connector, edid);
@@ -256,11 +251,10 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
256 251
257static void cdv_hdmi_destroy(struct drm_connector *connector) 252static void cdv_hdmi_destroy(struct drm_connector *connector)
258{ 253{
259 struct psb_intel_encoder *psb_intel_encoder = 254 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
260 psb_intel_attached_encoder(connector);
261 255
262 if (psb_intel_encoder->i2c_bus) 256 if (gma_encoder->i2c_bus)
263 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); 257 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
264 drm_sysfs_connector_remove(connector); 258 drm_sysfs_connector_remove(connector);
265 drm_connector_cleanup(connector); 259 drm_connector_cleanup(connector);
266 kfree(connector); 260 kfree(connector);
@@ -269,16 +263,16 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
269static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = { 263static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
270 .dpms = cdv_hdmi_dpms, 264 .dpms = cdv_hdmi_dpms,
271 .mode_fixup = cdv_hdmi_mode_fixup, 265 .mode_fixup = cdv_hdmi_mode_fixup,
272 .prepare = psb_intel_encoder_prepare, 266 .prepare = gma_encoder_prepare,
273 .mode_set = cdv_hdmi_mode_set, 267 .mode_set = cdv_hdmi_mode_set,
274 .commit = psb_intel_encoder_commit, 268 .commit = gma_encoder_commit,
275}; 269};
276 270
277static const struct drm_connector_helper_funcs 271static const struct drm_connector_helper_funcs
278 cdv_hdmi_connector_helper_funcs = { 272 cdv_hdmi_connector_helper_funcs = {
279 .get_modes = cdv_hdmi_get_modes, 273 .get_modes = cdv_hdmi_get_modes,
280 .mode_valid = cdv_hdmi_mode_valid, 274 .mode_valid = cdv_hdmi_mode_valid,
281 .best_encoder = psb_intel_best_encoder, 275 .best_encoder = gma_best_encoder,
282}; 276};
283 277
284static const struct drm_connector_funcs cdv_hdmi_connector_funcs = { 278static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
@@ -294,23 +288,22 @@ static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
294void cdv_hdmi_init(struct drm_device *dev, 288void cdv_hdmi_init(struct drm_device *dev,
295 struct psb_intel_mode_device *mode_dev, int reg) 289 struct psb_intel_mode_device *mode_dev, int reg)
296{ 290{
297 struct psb_intel_encoder *psb_intel_encoder; 291 struct gma_encoder *gma_encoder;
298 struct psb_intel_connector *psb_intel_connector; 292 struct gma_connector *gma_connector;
299 struct drm_connector *connector; 293 struct drm_connector *connector;
300 struct drm_encoder *encoder; 294 struct drm_encoder *encoder;
301 struct mid_intel_hdmi_priv *hdmi_priv; 295 struct mid_intel_hdmi_priv *hdmi_priv;
302 int ddc_bus; 296 int ddc_bus;
303 297
304 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), 298 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
305 GFP_KERNEL);
306 299
307 if (!psb_intel_encoder) 300 if (!gma_encoder)
308 return; 301 return;
309 302
310 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), 303 gma_connector = kzalloc(sizeof(struct gma_connector),
311 GFP_KERNEL); 304 GFP_KERNEL);
312 305
313 if (!psb_intel_connector) 306 if (!gma_connector)
314 goto err_connector; 307 goto err_connector;
315 308
316 hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL); 309 hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
@@ -318,9 +311,9 @@ void cdv_hdmi_init(struct drm_device *dev,
318 if (!hdmi_priv) 311 if (!hdmi_priv)
319 goto err_priv; 312 goto err_priv;
320 313
321 connector = &psb_intel_connector->base; 314 connector = &gma_connector->base;
322 connector->polled = DRM_CONNECTOR_POLL_HPD; 315 connector->polled = DRM_CONNECTOR_POLL_HPD;
323 encoder = &psb_intel_encoder->base; 316 encoder = &gma_encoder->base;
324 drm_connector_init(dev, connector, 317 drm_connector_init(dev, connector,
325 &cdv_hdmi_connector_funcs, 318 &cdv_hdmi_connector_funcs,
326 DRM_MODE_CONNECTOR_DVID); 319 DRM_MODE_CONNECTOR_DVID);
@@ -328,12 +321,11 @@ void cdv_hdmi_init(struct drm_device *dev,
328 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 321 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
329 DRM_MODE_ENCODER_TMDS); 322 DRM_MODE_ENCODER_TMDS);
330 323
331 psb_intel_connector_attach_encoder(psb_intel_connector, 324 gma_connector_attach_encoder(gma_connector, gma_encoder);
332 psb_intel_encoder); 325 gma_encoder->type = INTEL_OUTPUT_HDMI;
333 psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
334 hdmi_priv->hdmi_reg = reg; 326 hdmi_priv->hdmi_reg = reg;
335 hdmi_priv->has_hdmi_sink = false; 327 hdmi_priv->has_hdmi_sink = false;
336 psb_intel_encoder->dev_priv = hdmi_priv; 328 gma_encoder->dev_priv = hdmi_priv;
337 329
338 drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs); 330 drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
339 drm_connector_helper_add(connector, 331 drm_connector_helper_add(connector,
@@ -349,11 +341,11 @@ void cdv_hdmi_init(struct drm_device *dev,
349 switch (reg) { 341 switch (reg) {
350 case SDVOB: 342 case SDVOB:
351 ddc_bus = GPIOE; 343 ddc_bus = GPIOE;
352 psb_intel_encoder->ddi_select = DDI0_SELECT; 344 gma_encoder->ddi_select = DDI0_SELECT;
353 break; 345 break;
354 case SDVOC: 346 case SDVOC:
355 ddc_bus = GPIOD; 347 ddc_bus = GPIOD;
356 psb_intel_encoder->ddi_select = DDI1_SELECT; 348 gma_encoder->ddi_select = DDI1_SELECT;
357 break; 349 break;
358 default: 350 default:
359 DRM_ERROR("unknown reg 0x%x for HDMI\n", reg); 351 DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
@@ -361,16 +353,15 @@ void cdv_hdmi_init(struct drm_device *dev,
361 break; 353 break;
362 } 354 }
363 355
364 psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev, 356 gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
365 ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC"); 357 ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
366 358
367 if (!psb_intel_encoder->i2c_bus) { 359 if (!gma_encoder->i2c_bus) {
368 dev_err(dev->dev, "No ddc adapter available!\n"); 360 dev_err(dev->dev, "No ddc adapter available!\n");
369 goto failed_ddc; 361 goto failed_ddc;
370 } 362 }
371 363
372 hdmi_priv->hdmi_i2c_adapter = 364 hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
373 &(psb_intel_encoder->i2c_bus->adapter);
374 hdmi_priv->dev = dev; 365 hdmi_priv->dev = dev;
375 drm_sysfs_connector_add(connector); 366 drm_sysfs_connector_add(connector);
376 return; 367 return;
@@ -379,7 +370,7 @@ failed_ddc:
379 drm_encoder_cleanup(encoder); 370 drm_encoder_cleanup(encoder);
380 drm_connector_cleanup(connector); 371 drm_connector_cleanup(connector);
381err_priv: 372err_priv:
382 kfree(psb_intel_connector); 373 kfree(gma_connector);
383err_connector: 374err_connector:
384 kfree(psb_intel_encoder); 375 kfree(gma_encoder);
385} 376}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index d81dbc3368f0..20e08e65d46c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -356,8 +356,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
356{ 356{
357 struct drm_device *dev = encoder->dev; 357 struct drm_device *dev = encoder->dev;
358 struct drm_psb_private *dev_priv = dev->dev_private; 358 struct drm_psb_private *dev_priv = dev->dev_private;
359 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc( 359 struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
360 encoder->crtc);
361 u32 pfit_control; 360 u32 pfit_control;
362 361
363 /* 362 /*
@@ -379,7 +378,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
379 else 378 else
380 pfit_control = 0; 379 pfit_control = 0;
381 380
382 pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT; 381 pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
383 382
384 if (dev_priv->lvds_dither) 383 if (dev_priv->lvds_dither)
385 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 384 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
@@ -407,12 +406,11 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
407{ 406{
408 struct drm_device *dev = connector->dev; 407 struct drm_device *dev = connector->dev;
409 struct drm_psb_private *dev_priv = dev->dev_private; 408 struct drm_psb_private *dev_priv = dev->dev_private;
410 struct psb_intel_encoder *psb_intel_encoder = 409 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
411 psb_intel_attached_encoder(connector);
412 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 410 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
413 int ret; 411 int ret;
414 412
415 ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter); 413 ret = psb_intel_ddc_get_modes(connector, &gma_encoder->i2c_bus->adapter);
416 414
417 if (ret) 415 if (ret)
418 return ret; 416 return ret;
@@ -444,11 +442,10 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
444 */ 442 */
445static void cdv_intel_lvds_destroy(struct drm_connector *connector) 443static void cdv_intel_lvds_destroy(struct drm_connector *connector)
446{ 444{
447 struct psb_intel_encoder *psb_intel_encoder = 445 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
448 psb_intel_attached_encoder(connector);
449 446
450 if (psb_intel_encoder->i2c_bus) 447 if (gma_encoder->i2c_bus)
451 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); 448 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
452 drm_sysfs_connector_remove(connector); 449 drm_sysfs_connector_remove(connector);
453 drm_connector_cleanup(connector); 450 drm_connector_cleanup(connector);
454 kfree(connector); 451 kfree(connector);
@@ -461,8 +458,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
461 struct drm_encoder *encoder = connector->encoder; 458 struct drm_encoder *encoder = connector->encoder;
462 459
463 if (!strcmp(property->name, "scaling mode") && encoder) { 460 if (!strcmp(property->name, "scaling mode") && encoder) {
464 struct psb_intel_crtc *crtc = 461 struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
465 to_psb_intel_crtc(encoder->crtc);
466 uint64_t curValue; 462 uint64_t curValue;
467 463
468 if (!crtc) 464 if (!crtc)
@@ -529,7 +525,7 @@ static const struct drm_connector_helper_funcs
529 cdv_intel_lvds_connector_helper_funcs = { 525 cdv_intel_lvds_connector_helper_funcs = {
530 .get_modes = cdv_intel_lvds_get_modes, 526 .get_modes = cdv_intel_lvds_get_modes,
531 .mode_valid = cdv_intel_lvds_mode_valid, 527 .mode_valid = cdv_intel_lvds_mode_valid,
532 .best_encoder = psb_intel_best_encoder, 528 .best_encoder = gma_best_encoder,
533}; 529};
534 530
535static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = { 531static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
@@ -612,8 +608,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
612void cdv_intel_lvds_init(struct drm_device *dev, 608void cdv_intel_lvds_init(struct drm_device *dev,
613 struct psb_intel_mode_device *mode_dev) 609 struct psb_intel_mode_device *mode_dev)
614{ 610{
615 struct psb_intel_encoder *psb_intel_encoder; 611 struct gma_encoder *gma_encoder;
616 struct psb_intel_connector *psb_intel_connector; 612 struct gma_connector *gma_connector;
617 struct cdv_intel_lvds_priv *lvds_priv; 613 struct cdv_intel_lvds_priv *lvds_priv;
618 struct drm_connector *connector; 614 struct drm_connector *connector;
619 struct drm_encoder *encoder; 615 struct drm_encoder *encoder;
@@ -630,24 +626,24 @@ void cdv_intel_lvds_init(struct drm_device *dev,
630 return; 626 return;
631 } 627 }
632 628
633 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), 629 gma_encoder = kzalloc(sizeof(struct gma_encoder),
634 GFP_KERNEL); 630 GFP_KERNEL);
635 if (!psb_intel_encoder) 631 if (!gma_encoder)
636 return; 632 return;
637 633
638 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), 634 gma_connector = kzalloc(sizeof(struct gma_connector),
639 GFP_KERNEL); 635 GFP_KERNEL);
640 if (!psb_intel_connector) 636 if (!gma_connector)
641 goto failed_connector; 637 goto failed_connector;
642 638
643 lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL); 639 lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
644 if (!lvds_priv) 640 if (!lvds_priv)
645 goto failed_lvds_priv; 641 goto failed_lvds_priv;
646 642
647 psb_intel_encoder->dev_priv = lvds_priv; 643 gma_encoder->dev_priv = lvds_priv;
648 644
649 connector = &psb_intel_connector->base; 645 connector = &gma_connector->base;
650 encoder = &psb_intel_encoder->base; 646 encoder = &gma_encoder->base;
651 647
652 648
653 drm_connector_init(dev, connector, 649 drm_connector_init(dev, connector,
@@ -659,9 +655,8 @@ void cdv_intel_lvds_init(struct drm_device *dev,
659 DRM_MODE_ENCODER_LVDS); 655 DRM_MODE_ENCODER_LVDS);
660 656
661 657
662 psb_intel_connector_attach_encoder(psb_intel_connector, 658 gma_connector_attach_encoder(gma_connector, gma_encoder);
663 psb_intel_encoder); 659 gma_encoder->type = INTEL_OUTPUT_LVDS;
664 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
665 660
666 drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs); 661 drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
667 drm_connector_helper_add(connector, 662 drm_connector_helper_add(connector,
@@ -682,16 +677,16 @@ void cdv_intel_lvds_init(struct drm_device *dev,
682 * Set up I2C bus 677 * Set up I2C bus
683 * FIXME: distroy i2c_bus when exit 678 * FIXME: distroy i2c_bus when exit
684 */ 679 */
685 psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev, 680 gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
686 GPIOB, 681 GPIOB,
687 "LVDSBLC_B"); 682 "LVDSBLC_B");
688 if (!psb_intel_encoder->i2c_bus) { 683 if (!gma_encoder->i2c_bus) {
689 dev_printk(KERN_ERR, 684 dev_printk(KERN_ERR,
690 &dev->pdev->dev, "I2C bus registration failed.\n"); 685 &dev->pdev->dev, "I2C bus registration failed.\n");
691 goto failed_blc_i2c; 686 goto failed_blc_i2c;
692 } 687 }
693 psb_intel_encoder->i2c_bus->slave_addr = 0x2C; 688 gma_encoder->i2c_bus->slave_addr = 0x2C;
694 dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus; 689 dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
695 690
696 /* 691 /*
697 * LVDS discovery: 692 * LVDS discovery:
@@ -704,10 +699,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
704 */ 699 */
705 700
706 /* Set up the DDC bus. */ 701 /* Set up the DDC bus. */
707 psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev, 702 gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
708 GPIOC, 703 GPIOC,
709 "LVDSDDC_C"); 704 "LVDSDDC_C");
710 if (!psb_intel_encoder->ddc_bus) { 705 if (!gma_encoder->ddc_bus) {
711 dev_printk(KERN_ERR, &dev->pdev->dev, 706 dev_printk(KERN_ERR, &dev->pdev->dev,
712 "DDC bus registration " "failed.\n"); 707 "DDC bus registration " "failed.\n");
713 goto failed_ddc; 708 goto failed_ddc;
@@ -718,7 +713,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
718 * preferred mode is the right one. 713 * preferred mode is the right one.
719 */ 714 */
720 psb_intel_ddc_get_modes(connector, 715 psb_intel_ddc_get_modes(connector,
721 &psb_intel_encoder->ddc_bus->adapter); 716 &gma_encoder->ddc_bus->adapter);
722 list_for_each_entry(scan, &connector->probed_modes, head) { 717 list_for_each_entry(scan, &connector->probed_modes, head) {
723 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 718 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
724 mode_dev->panel_fixed_mode = 719 mode_dev->panel_fixed_mode =
@@ -782,19 +777,19 @@ out:
782 777
783failed_find: 778failed_find:
784 printk(KERN_ERR "Failed find\n"); 779 printk(KERN_ERR "Failed find\n");
785 if (psb_intel_encoder->ddc_bus) 780 if (gma_encoder->ddc_bus)
786 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); 781 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
787failed_ddc: 782failed_ddc:
788 printk(KERN_ERR "Failed DDC\n"); 783 printk(KERN_ERR "Failed DDC\n");
789 if (psb_intel_encoder->i2c_bus) 784 if (gma_encoder->i2c_bus)
790 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); 785 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
791failed_blc_i2c: 786failed_blc_i2c:
792 printk(KERN_ERR "Failed BLC\n"); 787 printk(KERN_ERR "Failed BLC\n");
793 drm_encoder_cleanup(encoder); 788 drm_encoder_cleanup(encoder);
794 drm_connector_cleanup(connector); 789 drm_connector_cleanup(connector);
795 kfree(lvds_priv); 790 kfree(lvds_priv);
796failed_lvds_priv: 791failed_lvds_priv:
797 kfree(psb_intel_connector); 792 kfree(gma_connector);
798failed_connector: 793failed_connector:
799 kfree(psb_intel_encoder); 794 kfree(gma_encoder);
800} 795}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8b1b6d923abe..01dd7d225762 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -321,10 +321,8 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
321 /* Begin by trying to use stolen memory backing */ 321 /* Begin by trying to use stolen memory backing */
322 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1); 322 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
323 if (backing) { 323 if (backing) {
324 if (drm_gem_private_object_init(dev, 324 drm_gem_private_object_init(dev, &backing->gem, aligned_size);
325 &backing->gem, aligned_size) == 0) 325 return backing;
326 return backing;
327 psb_gtt_free_range(dev, backing);
328 } 326 }
329 return NULL; 327 return NULL;
330} 328}
@@ -522,21 +520,21 @@ static struct drm_framebuffer *psb_user_framebuffer_create
522static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 520static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
523 u16 blue, int regno) 521 u16 blue, int regno)
524{ 522{
525 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 523 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
526 524
527 intel_crtc->lut_r[regno] = red >> 8; 525 gma_crtc->lut_r[regno] = red >> 8;
528 intel_crtc->lut_g[regno] = green >> 8; 526 gma_crtc->lut_g[regno] = green >> 8;
529 intel_crtc->lut_b[regno] = blue >> 8; 527 gma_crtc->lut_b[regno] = blue >> 8;
530} 528}
531 529
532static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red, 530static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
533 u16 *green, u16 *blue, int regno) 531 u16 *green, u16 *blue, int regno)
534{ 532{
535 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 533 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
536 534
537 *red = intel_crtc->lut_r[regno] << 8; 535 *red = gma_crtc->lut_r[regno] << 8;
538 *green = intel_crtc->lut_g[regno] << 8; 536 *green = gma_crtc->lut_g[regno] << 8;
539 *blue = intel_crtc->lut_b[regno] << 8; 537 *blue = gma_crtc->lut_b[regno] << 8;
540} 538}
541 539
542static int psbfb_probe(struct drm_fb_helper *helper, 540static int psbfb_probe(struct drm_fb_helper *helper,
@@ -705,13 +703,12 @@ static void psb_setup_outputs(struct drm_device *dev)
705 703
706 list_for_each_entry(connector, &dev->mode_config.connector_list, 704 list_for_each_entry(connector, &dev->mode_config.connector_list,
707 head) { 705 head) {
708 struct psb_intel_encoder *psb_intel_encoder = 706 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
709 psb_intel_attached_encoder(connector); 707 struct drm_encoder *encoder = &gma_encoder->base;
710 struct drm_encoder *encoder = &psb_intel_encoder->base;
711 int crtc_mask = 0, clone_mask = 0; 708 int crtc_mask = 0, clone_mask = 0;
712 709
713 /* valid crtcs */ 710 /* valid crtcs */
714 switch (psb_intel_encoder->type) { 711 switch (gma_encoder->type) {
715 case INTEL_OUTPUT_ANALOG: 712 case INTEL_OUTPUT_ANALOG:
716 crtc_mask = (1 << 0); 713 crtc_mask = (1 << 0);
717 clone_mask = (1 << INTEL_OUTPUT_ANALOG); 714 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
@@ -746,7 +743,7 @@ static void psb_setup_outputs(struct drm_device *dev)
746 } 743 }
747 encoder->possible_crtcs = crtc_mask; 744 encoder->possible_crtcs = crtc_mask;
748 encoder->possible_clones = 745 encoder->possible_clones =
749 psb_intel_connector_clones(dev, clone_mask); 746 gma_connector_clones(dev, clone_mask);
750 } 747 }
751} 748}
752 749
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 989558a9e6ee..395f20b07aab 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -41,7 +41,7 @@ struct psb_fbdev {
41 41
42#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base) 42#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
43 43
44extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask); 44extern int gma_connector_clones(struct drm_device *dev, int type_mask);
45 45
46#endif 46#endif
47 47
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index eefd6cc5b80d..10ae8c52d06f 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -26,6 +26,7 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm.h> 27#include <drm/drm.h>
28#include <drm/gma_drm.h> 28#include <drm/gma_drm.h>
29#include <drm/drm_vma_manager.h>
29#include "psb_drv.h" 30#include "psb_drv.h"
30 31
31int psb_gem_init_object(struct drm_gem_object *obj) 32int psb_gem_init_object(struct drm_gem_object *obj)
@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
38 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); 39 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
39 40
40 /* Remove the list map if one is present */ 41 /* Remove the list map if one is present */
41 if (obj->map_list.map) 42 drm_gem_free_mmap_offset(obj);
42 drm_gem_free_mmap_offset(obj);
43 drm_gem_object_release(obj); 43 drm_gem_object_release(obj);
44 44
45 /* This must occur last as it frees up the memory of the GEM object */ 45 /* This must occur last as it frees up the memory of the GEM object */
@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
81 /* What validation is needed here ? */ 81 /* What validation is needed here ? */
82 82
83 /* Make it mmapable */ 83 /* Make it mmapable */
84 if (!obj->map_list.map) { 84 ret = drm_gem_create_mmap_offset(obj);
85 ret = drm_gem_create_mmap_offset(obj); 85 if (ret)
86 if (ret) 86 goto out;
87 goto out; 87 *offset = drm_vma_node_offset_addr(&obj->vma_node);
88 }
89 /* GEM should really work out the hash offsets for us */
90 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
91out: 88out:
92 drm_gem_object_unreference(obj); 89 drm_gem_object_unreference(obj);
93unlock: 90unlock:
@@ -165,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
165} 162}
166 163
167/** 164/**
168 * psb_gem_dumb_destroy - destroy a dumb buffer
169 * @file: client file
170 * @dev: our DRM device
171 * @handle: the object handle
172 *
173 * Destroy a handle that was created via psb_gem_dumb_create, at least
174 * we hope it was created that way. i915 seems to assume the caller
175 * does the checking but that might be worth review ! FIXME
176 */
177int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
178 uint32_t handle)
179{
180 /* No special work needed, drop the reference and see what falls out */
181 return drm_gem_handle_delete(file, handle);
182}
183
184/**
185 * psb_gem_fault - pagefault handler for GEM objects 165 * psb_gem_fault - pagefault handler for GEM objects
186 * @vma: the VMA of the GEM object 166 * @vma: the VMA of the GEM object
187 * @vmf: fault detail 167 * @vmf: fault detail
@@ -261,11 +241,12 @@ static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
261 struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1); 241 struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
262 if (gtt == NULL) 242 if (gtt == NULL)
263 return -ENOMEM; 243 return -ENOMEM;
264 if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0) 244
265 goto free_gtt; 245 drm_gem_private_object_init(dev, &gtt->gem, size);
266 if (drm_gem_handle_create(file, &gtt->gem, handle) == 0) 246 if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
267 return 0; 247 return 0;
268free_gtt: 248
249 drm_gem_object_release(&gtt->gem);
269 psb_gtt_free_range(dev, gtt); 250 psb_gtt_free_range(dev, gtt);
270 return -ENOMEM; 251 return -ENOMEM;
271} 252}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
new file mode 100644
index 000000000000..24e8af3d22bf
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -0,0 +1,776 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
20 */
21
22#include <drm/drmP.h>
23#include "gma_display.h"
24#include "psb_intel_drv.h"
25#include "psb_intel_reg.h"
26#include "psb_drv.h"
27#include "framebuffer.h"
28
29/**
30 * Returns whether any output on the specified pipe is of the specified type
31 */
32bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
33{
34 struct drm_device *dev = crtc->dev;
35 struct drm_mode_config *mode_config = &dev->mode_config;
36 struct drm_connector *l_entry;
37
38 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
39 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
40 struct gma_encoder *gma_encoder =
41 gma_attached_encoder(l_entry);
42 if (gma_encoder->type == type)
43 return true;
44 }
45 }
46
47 return false;
48}
49
50void gma_wait_for_vblank(struct drm_device *dev)
51{
52 /* Wait for 20ms, i.e. one cycle at 50hz. */
53 mdelay(20);
54}
55
56int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
57 struct drm_framebuffer *old_fb)
58{
59 struct drm_device *dev = crtc->dev;
60 struct drm_psb_private *dev_priv = dev->dev_private;
61 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
62 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
63 int pipe = gma_crtc->pipe;
64 const struct psb_offset *map = &dev_priv->regmap[pipe];
65 unsigned long start, offset;
66 u32 dspcntr;
67 int ret = 0;
68
69 if (!gma_power_begin(dev, true))
70 return 0;
71
72 /* no fb bound */
73 if (!crtc->fb) {
74 dev_err(dev->dev, "No FB bound\n");
75 goto gma_pipe_cleaner;
76 }
77
78 /* We are displaying this buffer, make sure it is actually loaded
79 into the GTT */
80 ret = psb_gtt_pin(psbfb->gtt);
81 if (ret < 0)
82 goto gma_pipe_set_base_exit;
83 start = psbfb->gtt->offset;
84 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
85
86 REG_WRITE(map->stride, crtc->fb->pitches[0]);
87
88 dspcntr = REG_READ(map->cntr);
89 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
90
91 switch (crtc->fb->bits_per_pixel) {
92 case 8:
93 dspcntr |= DISPPLANE_8BPP;
94 break;
95 case 16:
96 if (crtc->fb->depth == 15)
97 dspcntr |= DISPPLANE_15_16BPP;
98 else
99 dspcntr |= DISPPLANE_16BPP;
100 break;
101 case 24:
102 case 32:
103 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
104 break;
105 default:
106 dev_err(dev->dev, "Unknown color depth\n");
107 ret = -EINVAL;
108 goto gma_pipe_set_base_exit;
109 }
110 REG_WRITE(map->cntr, dspcntr);
111
112 dev_dbg(dev->dev,
113 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
114
115 /* FIXME: Investigate whether this really is the base for psb and why
116 the linear offset is named base for the other chips. map->surf
117 should be the base and map->linoff the offset for all chips */
118 if (IS_PSB(dev)) {
119 REG_WRITE(map->base, offset + start);
120 REG_READ(map->base);
121 } else {
122 REG_WRITE(map->base, offset);
123 REG_READ(map->base);
124 REG_WRITE(map->surf, start);
125 REG_READ(map->surf);
126 }
127
128gma_pipe_cleaner:
129 /* If there was a previous display we can now unpin it */
130 if (old_fb)
131 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
132
133gma_pipe_set_base_exit:
134 gma_power_end(dev);
135 return ret;
136}
137
138/* Loads the palette/gamma unit for the CRTC with the prepared values */
139void gma_crtc_load_lut(struct drm_crtc *crtc)
140{
141 struct drm_device *dev = crtc->dev;
142 struct drm_psb_private *dev_priv = dev->dev_private;
143 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
144 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
145 int palreg = map->palette;
146 int i;
147
148 /* The clocks have to be on to load the palette. */
149 if (!crtc->enabled)
150 return;
151
152 if (gma_power_begin(dev, false)) {
153 for (i = 0; i < 256; i++) {
154 REG_WRITE(palreg + 4 * i,
155 ((gma_crtc->lut_r[i] +
156 gma_crtc->lut_adj[i]) << 16) |
157 ((gma_crtc->lut_g[i] +
158 gma_crtc->lut_adj[i]) << 8) |
159 (gma_crtc->lut_b[i] +
160 gma_crtc->lut_adj[i]));
161 }
162 gma_power_end(dev);
163 } else {
164 for (i = 0; i < 256; i++) {
165 /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
166 dev_priv->regs.pipe[0].palette[i] =
167 ((gma_crtc->lut_r[i] +
168 gma_crtc->lut_adj[i]) << 16) |
169 ((gma_crtc->lut_g[i] +
170 gma_crtc->lut_adj[i]) << 8) |
171 (gma_crtc->lut_b[i] +
172 gma_crtc->lut_adj[i]);
173 }
174
175 }
176}
177
178void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
179 u32 start, u32 size)
180{
181 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
182 int i;
183 int end = (start + size > 256) ? 256 : start + size;
184
185 for (i = start; i < end; i++) {
186 gma_crtc->lut_r[i] = red[i] >> 8;
187 gma_crtc->lut_g[i] = green[i] >> 8;
188 gma_crtc->lut_b[i] = blue[i] >> 8;
189 }
190
191 gma_crtc_load_lut(crtc);
192}
193
194/**
195 * Sets the power management mode of the pipe and plane.
196 *
197 * This code should probably grow support for turning the cursor off and back
198 * on appropriately at the same time as we're turning the pipe off/on.
199 */
200void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
201{
202 struct drm_device *dev = crtc->dev;
203 struct drm_psb_private *dev_priv = dev->dev_private;
204 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
205 int pipe = gma_crtc->pipe;
206 const struct psb_offset *map = &dev_priv->regmap[pipe];
207 u32 temp;
208
209 /* XXX: When our outputs are all unaware of DPMS modes other than off
210 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
211 */
212
213 if (IS_CDV(dev))
214 dev_priv->ops->disable_sr(dev);
215
216 switch (mode) {
217 case DRM_MODE_DPMS_ON:
218 case DRM_MODE_DPMS_STANDBY:
219 case DRM_MODE_DPMS_SUSPEND:
220 if (gma_crtc->active)
221 break;
222
223 gma_crtc->active = true;
224
225 /* Enable the DPLL */
226 temp = REG_READ(map->dpll);
227 if ((temp & DPLL_VCO_ENABLE) == 0) {
228 REG_WRITE(map->dpll, temp);
229 REG_READ(map->dpll);
230 /* Wait for the clocks to stabilize. */
231 udelay(150);
232 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
233 REG_READ(map->dpll);
234 /* Wait for the clocks to stabilize. */
235 udelay(150);
236 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
237 REG_READ(map->dpll);
238 /* Wait for the clocks to stabilize. */
239 udelay(150);
240 }
241
242 /* Enable the plane */
243 temp = REG_READ(map->cntr);
244 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
245 REG_WRITE(map->cntr,
246 temp | DISPLAY_PLANE_ENABLE);
247 /* Flush the plane changes */
248 REG_WRITE(map->base, REG_READ(map->base));
249 }
250
251 udelay(150);
252
253 /* Enable the pipe */
254 temp = REG_READ(map->conf);
255 if ((temp & PIPEACONF_ENABLE) == 0)
256 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
257
258 temp = REG_READ(map->status);
259 temp &= ~(0xFFFF);
260 temp |= PIPE_FIFO_UNDERRUN;
261 REG_WRITE(map->status, temp);
262 REG_READ(map->status);
263
264 gma_crtc_load_lut(crtc);
265
266 /* Give the overlay scaler a chance to enable
267 * if it's on this pipe */
268 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
269 break;
270 case DRM_MODE_DPMS_OFF:
271 if (!gma_crtc->active)
272 break;
273
274 gma_crtc->active = false;
275
276 /* Give the overlay scaler a chance to disable
277 * if it's on this pipe */
278 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
279
280 /* Disable the VGA plane that we never use */
281 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
282
283 /* Turn off vblank interrupts */
284 drm_vblank_off(dev, pipe);
285
286 /* Wait for vblank for the disable to take effect */
287 gma_wait_for_vblank(dev);
288
289 /* Disable plane */
290 temp = REG_READ(map->cntr);
291 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
292 REG_WRITE(map->cntr,
293 temp & ~DISPLAY_PLANE_ENABLE);
294 /* Flush the plane changes */
295 REG_WRITE(map->base, REG_READ(map->base));
296 REG_READ(map->base);
297 }
298
299 /* Disable pipe */
300 temp = REG_READ(map->conf);
301 if ((temp & PIPEACONF_ENABLE) != 0) {
302 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
303 REG_READ(map->conf);
304 }
305
306 /* Wait for vblank for the disable to take effect. */
307 gma_wait_for_vblank(dev);
308
309 udelay(150);
310
311 /* Disable DPLL */
312 temp = REG_READ(map->dpll);
313 if ((temp & DPLL_VCO_ENABLE) != 0) {
314 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
315 REG_READ(map->dpll);
316 }
317
318 /* Wait for the clocks to turn off. */
319 udelay(150);
320 break;
321 }
322
323 if (IS_CDV(dev))
324 dev_priv->ops->update_wm(dev, crtc);
325
326 /* Set FIFO watermarks */
327 REG_WRITE(DSPARB, 0x3F3E);
328}
329
330int gma_crtc_cursor_set(struct drm_crtc *crtc,
331 struct drm_file *file_priv,
332 uint32_t handle,
333 uint32_t width, uint32_t height)
334{
335 struct drm_device *dev = crtc->dev;
336 struct drm_psb_private *dev_priv = dev->dev_private;
337 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
338 int pipe = gma_crtc->pipe;
339 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
340 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
341 uint32_t temp;
342 size_t addr = 0;
343 struct gtt_range *gt;
344 struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
345 struct drm_gem_object *obj;
346 void *tmp_dst, *tmp_src;
347 int ret = 0, i, cursor_pages;
348
349 /* If we didn't get a handle then turn the cursor off */
350 if (!handle) {
351 temp = CURSOR_MODE_DISABLE;
352
353 if (gma_power_begin(dev, false)) {
354 REG_WRITE(control, temp);
355 REG_WRITE(base, 0);
356 gma_power_end(dev);
357 }
358
359 /* Unpin the old GEM object */
360 if (gma_crtc->cursor_obj) {
361 gt = container_of(gma_crtc->cursor_obj,
362 struct gtt_range, gem);
363 psb_gtt_unpin(gt);
364 drm_gem_object_unreference(gma_crtc->cursor_obj);
365 gma_crtc->cursor_obj = NULL;
366 }
367
368 return 0;
369 }
370
371 /* Currently we only support 64x64 cursors */
372 if (width != 64 || height != 64) {
373 dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
374 return -EINVAL;
375 }
376
377 obj = drm_gem_object_lookup(dev, file_priv, handle);
378 if (!obj)
379 return -ENOENT;
380
381 if (obj->size < width * height * 4) {
382 dev_dbg(dev->dev, "Buffer is too small\n");
383 ret = -ENOMEM;
384 goto unref_cursor;
385 }
386
387 gt = container_of(obj, struct gtt_range, gem);
388
389 /* Pin the memory into the GTT */
390 ret = psb_gtt_pin(gt);
391 if (ret) {
392 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
393 goto unref_cursor;
394 }
395
396 if (dev_priv->ops->cursor_needs_phys) {
397 if (cursor_gt == NULL) {
398 dev_err(dev->dev, "No hardware cursor mem available");
399 ret = -ENOMEM;
400 goto unref_cursor;
401 }
402
403 /* Prevent overflow */
404 if (gt->npage > 4)
405 cursor_pages = 4;
406 else
407 cursor_pages = gt->npage;
408
409 /* Copy the cursor to cursor mem */
410 tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
411 for (i = 0; i < cursor_pages; i++) {
412 tmp_src = kmap(gt->pages[i]);
413 memcpy(tmp_dst, tmp_src, PAGE_SIZE);
414 kunmap(gt->pages[i]);
415 tmp_dst += PAGE_SIZE;
416 }
417
418 addr = gma_crtc->cursor_addr;
419 } else {
420 addr = gt->offset;
421 gma_crtc->cursor_addr = addr;
422 }
423
424 temp = 0;
425 /* set the pipe for the cursor */
426 temp |= (pipe << 28);
427 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
428
429 if (gma_power_begin(dev, false)) {
430 REG_WRITE(control, temp);
431 REG_WRITE(base, addr);
432 gma_power_end(dev);
433 }
434
435 /* unpin the old bo */
436 if (gma_crtc->cursor_obj) {
437 gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
438 psb_gtt_unpin(gt);
439 drm_gem_object_unreference(gma_crtc->cursor_obj);
440 }
441
442 gma_crtc->cursor_obj = obj;
443 return ret;
444
445unref_cursor:
446 drm_gem_object_unreference(obj);
447 return ret;
448}
449
450int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
451{
452 struct drm_device *dev = crtc->dev;
453 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
454 int pipe = gma_crtc->pipe;
455 uint32_t temp = 0;
456 uint32_t addr;
457
458 if (x < 0) {
459 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
460 x = -x;
461 }
462 if (y < 0) {
463 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
464 y = -y;
465 }
466
467 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
468 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
469
470 addr = gma_crtc->cursor_addr;
471
472 if (gma_power_begin(dev, false)) {
473 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
474 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
475 gma_power_end(dev);
476 }
477 return 0;
478}
479
480bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
481 const struct drm_display_mode *mode,
482 struct drm_display_mode *adjusted_mode)
483{
484 return true;
485}
486
487void gma_crtc_prepare(struct drm_crtc *crtc)
488{
489 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
490 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
491}
492
493void gma_crtc_commit(struct drm_crtc *crtc)
494{
495 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
496 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
497}
498
499void gma_crtc_disable(struct drm_crtc *crtc)
500{
501 struct gtt_range *gt;
502 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
503
504 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
505
506 if (crtc->fb) {
507 gt = to_psb_fb(crtc->fb)->gtt;
508 psb_gtt_unpin(gt);
509 }
510}
511
512void gma_crtc_destroy(struct drm_crtc *crtc)
513{
514 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
515
516 kfree(gma_crtc->crtc_state);
517 drm_crtc_cleanup(crtc);
518 kfree(gma_crtc);
519}
520
521int gma_crtc_set_config(struct drm_mode_set *set)
522{
523 struct drm_device *dev = set->crtc->dev;
524 struct drm_psb_private *dev_priv = dev->dev_private;
525 int ret;
526
527 if (!dev_priv->rpm_enabled)
528 return drm_crtc_helper_set_config(set);
529
530 pm_runtime_forbid(&dev->pdev->dev);
531 ret = drm_crtc_helper_set_config(set);
532 pm_runtime_allow(&dev->pdev->dev);
533
534 return ret;
535}
536
537/**
538 * Save HW states of given crtc
539 */
540void gma_crtc_save(struct drm_crtc *crtc)
541{
542 struct drm_device *dev = crtc->dev;
543 struct drm_psb_private *dev_priv = dev->dev_private;
544 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
545 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
546 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
547 uint32_t palette_reg;
548 int i;
549
550 if (!crtc_state) {
551 dev_err(dev->dev, "No CRTC state found\n");
552 return;
553 }
554
555 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
556 crtc_state->savePIPECONF = REG_READ(map->conf);
557 crtc_state->savePIPESRC = REG_READ(map->src);
558 crtc_state->saveFP0 = REG_READ(map->fp0);
559 crtc_state->saveFP1 = REG_READ(map->fp1);
560 crtc_state->saveDPLL = REG_READ(map->dpll);
561 crtc_state->saveHTOTAL = REG_READ(map->htotal);
562 crtc_state->saveHBLANK = REG_READ(map->hblank);
563 crtc_state->saveHSYNC = REG_READ(map->hsync);
564 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
565 crtc_state->saveVBLANK = REG_READ(map->vblank);
566 crtc_state->saveVSYNC = REG_READ(map->vsync);
567 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
568
569 /* NOTE: DSPSIZE DSPPOS only for psb */
570 crtc_state->saveDSPSIZE = REG_READ(map->size);
571 crtc_state->saveDSPPOS = REG_READ(map->pos);
572
573 crtc_state->saveDSPBASE = REG_READ(map->base);
574
575 palette_reg = map->palette;
576 for (i = 0; i < 256; ++i)
577 crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
578}
579
580/**
581 * Restore HW states of given crtc
582 */
583void gma_crtc_restore(struct drm_crtc *crtc)
584{
585 struct drm_device *dev = crtc->dev;
586 struct drm_psb_private *dev_priv = dev->dev_private;
587 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
588 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
589 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
590 uint32_t palette_reg;
591 int i;
592
593 if (!crtc_state) {
594 dev_err(dev->dev, "No crtc state\n");
595 return;
596 }
597
598 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
599 REG_WRITE(map->dpll,
600 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
601 REG_READ(map->dpll);
602 udelay(150);
603 }
604
605 REG_WRITE(map->fp0, crtc_state->saveFP0);
606 REG_READ(map->fp0);
607
608 REG_WRITE(map->fp1, crtc_state->saveFP1);
609 REG_READ(map->fp1);
610
611 REG_WRITE(map->dpll, crtc_state->saveDPLL);
612 REG_READ(map->dpll);
613 udelay(150);
614
615 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
616 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
617 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
618 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
619 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
620 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
621 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
622
623 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
624 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
625
626 REG_WRITE(map->src, crtc_state->savePIPESRC);
627 REG_WRITE(map->base, crtc_state->saveDSPBASE);
628 REG_WRITE(map->conf, crtc_state->savePIPECONF);
629
630 gma_wait_for_vblank(dev);
631
632 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
633 REG_WRITE(map->base, crtc_state->saveDSPBASE);
634
635 gma_wait_for_vblank(dev);
636
637 palette_reg = map->palette;
638 for (i = 0; i < 256; ++i)
639 REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
640}
641
642void gma_encoder_prepare(struct drm_encoder *encoder)
643{
644 struct drm_encoder_helper_funcs *encoder_funcs =
645 encoder->helper_private;
646 /* lvds has its own version of prepare see psb_intel_lvds_prepare */
647 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
648}
649
650void gma_encoder_commit(struct drm_encoder *encoder)
651{
652 struct drm_encoder_helper_funcs *encoder_funcs =
653 encoder->helper_private;
654 /* lvds has its own version of commit see psb_intel_lvds_commit */
655 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
656}
657
658void gma_encoder_destroy(struct drm_encoder *encoder)
659{
660 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
661
662 drm_encoder_cleanup(encoder);
663 kfree(intel_encoder);
664}
665
666/* Currently there is only a 1:1 mapping of encoders and connectors */
667struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
668{
669 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
670
671 return &gma_encoder->base;
672}
673
674void gma_connector_attach_encoder(struct gma_connector *connector,
675 struct gma_encoder *encoder)
676{
677 connector->encoder = encoder;
678 drm_mode_connector_attach_encoder(&connector->base,
679 &encoder->base);
680}
681
682#define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
683
684bool gma_pll_is_valid(struct drm_crtc *crtc,
685 const struct gma_limit_t *limit,
686 struct gma_clock_t *clock)
687{
688 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
689 GMA_PLL_INVALID("p1 out of range");
690 if (clock->p < limit->p.min || limit->p.max < clock->p)
691 GMA_PLL_INVALID("p out of range");
692 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
693 GMA_PLL_INVALID("m2 out of range");
694 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
695 GMA_PLL_INVALID("m1 out of range");
696 /* On CDV m1 is always 0 */
697 if (clock->m1 <= clock->m2 && clock->m1 != 0)
698 GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
699 if (clock->m < limit->m.min || limit->m.max < clock->m)
700 GMA_PLL_INVALID("m out of range");
701 if (clock->n < limit->n.min || limit->n.max < clock->n)
702 GMA_PLL_INVALID("n out of range");
703 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
704 GMA_PLL_INVALID("vco out of range");
705 /* XXX: We may need to be checking "Dot clock"
706 * depending on the multiplier, connector, etc.,
707 * rather than just a single range.
708 */
709 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
710 GMA_PLL_INVALID("dot out of range");
711
712 return true;
713}
714
715bool gma_find_best_pll(const struct gma_limit_t *limit,
716 struct drm_crtc *crtc, int target, int refclk,
717 struct gma_clock_t *best_clock)
718{
719 struct drm_device *dev = crtc->dev;
720 const struct gma_clock_funcs *clock_funcs =
721 to_gma_crtc(crtc)->clock_funcs;
722 struct gma_clock_t clock;
723 int err = target;
724
725 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
726 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
727 /*
728 * For LVDS, if the panel is on, just rely on its current
729 * settings for dual-channel. We haven't figured out how to
730 * reliably set up different single/dual channel state, if we
731 * even can.
732 */
733 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
734 LVDS_CLKB_POWER_UP)
735 clock.p2 = limit->p2.p2_fast;
736 else
737 clock.p2 = limit->p2.p2_slow;
738 } else {
739 if (target < limit->p2.dot_limit)
740 clock.p2 = limit->p2.p2_slow;
741 else
742 clock.p2 = limit->p2.p2_fast;
743 }
744
745 memset(best_clock, 0, sizeof(*best_clock));
746
747 /* m1 is always 0 on CDV so the outmost loop will run just once */
748 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
749 for (clock.m2 = limit->m2.min;
750 (clock.m2 < clock.m1 || clock.m1 == 0) &&
751 clock.m2 <= limit->m2.max; clock.m2++) {
752 for (clock.n = limit->n.min;
753 clock.n <= limit->n.max; clock.n++) {
754 for (clock.p1 = limit->p1.min;
755 clock.p1 <= limit->p1.max;
756 clock.p1++) {
757 int this_err;
758
759 clock_funcs->clock(refclk, &clock);
760
761 if (!clock_funcs->pll_is_valid(crtc,
762 limit, &clock))
763 continue;
764
765 this_err = abs(clock.dot - target);
766 if (this_err < err) {
767 *best_clock = clock;
768 err = this_err;
769 }
770 }
771 }
772 }
773 }
774
775 return err != target;
776}
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
new file mode 100644
index 000000000000..78b9f986a6e5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -0,0 +1,103 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
20 */
21
22#ifndef _GMA_DISPLAY_H_
23#define _GMA_DISPLAY_H_
24
25#include <linux/pm_runtime.h>
26
27struct gma_clock_t {
28 /* given values */
29 int n;
30 int m1, m2;
31 int p1, p2;
32 /* derived values */
33 int dot;
34 int vco;
35 int m;
36 int p;
37};
38
39struct gma_range_t {
40 int min, max;
41};
42
43struct gma_p2_t {
44 int dot_limit;
45 int p2_slow, p2_fast;
46};
47
48struct gma_limit_t {
49 struct gma_range_t dot, vco, n, m, m1, m2, p, p1;
50 struct gma_p2_t p2;
51 bool (*find_pll)(const struct gma_limit_t *, struct drm_crtc *,
52 int target, int refclk,
53 struct gma_clock_t *best_clock);
54};
55
56struct gma_clock_funcs {
57 void (*clock)(int refclk, struct gma_clock_t *clock);
58 const struct gma_limit_t *(*limit)(struct drm_crtc *crtc, int refclk);
59 bool (*pll_is_valid)(struct drm_crtc *crtc,
60 const struct gma_limit_t *limit,
61 struct gma_clock_t *clock);
62};
63
64/* Common pipe related functions */
65extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type);
66extern void gma_wait_for_vblank(struct drm_device *dev);
67extern int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
68 struct drm_framebuffer *old_fb);
69extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
70 struct drm_file *file_priv,
71 uint32_t handle,
72 uint32_t width, uint32_t height);
73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
74extern void gma_crtc_load_lut(struct drm_crtc *crtc);
75extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
76 u16 *blue, u32 start, u32 size);
77extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
78extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
79 const struct drm_display_mode *mode,
80 struct drm_display_mode *adjusted_mode);
81extern void gma_crtc_prepare(struct drm_crtc *crtc);
82extern void gma_crtc_commit(struct drm_crtc *crtc);
83extern void gma_crtc_disable(struct drm_crtc *crtc);
84extern void gma_crtc_destroy(struct drm_crtc *crtc);
85extern int gma_crtc_set_config(struct drm_mode_set *set);
86
87extern void gma_crtc_save(struct drm_crtc *crtc);
88extern void gma_crtc_restore(struct drm_crtc *crtc);
89
90extern void gma_encoder_prepare(struct drm_encoder *encoder);
91extern void gma_encoder_commit(struct drm_encoder *encoder);
92extern void gma_encoder_destroy(struct drm_encoder *encoder);
93
94/* Common clock related functions */
95extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
96extern void gma_clock(int refclk, struct gma_clock_t *clock);
97extern bool gma_pll_is_valid(struct drm_crtc *crtc,
98 const struct gma_limit_t *limit,
99 struct gma_clock_t *clock);
100extern bool gma_find_best_pll(const struct gma_limit_t *limit,
101 struct drm_crtc *crtc, int target, int refclk,
102 struct gma_clock_t *best_clock);
103#endif
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 1f82183536a3..92babac362ec 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -196,37 +196,17 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
196 */ 196 */
197static int psb_gtt_attach_pages(struct gtt_range *gt) 197static int psb_gtt_attach_pages(struct gtt_range *gt)
198{ 198{
199 struct inode *inode; 199 struct page **pages;
200 struct address_space *mapping;
201 int i;
202 struct page *p;
203 int pages = gt->gem.size / PAGE_SIZE;
204 200
205 WARN_ON(gt->pages); 201 WARN_ON(gt->pages);
206 202
207 /* This is the shared memory object that backs the GEM resource */ 203 pages = drm_gem_get_pages(&gt->gem, 0);
208 inode = file_inode(gt->gem.filp); 204 if (IS_ERR(pages))
209 mapping = inode->i_mapping; 205 return PTR_ERR(pages);
210 206
211 gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL); 207 gt->pages = pages;
212 if (gt->pages == NULL)
213 return -ENOMEM;
214 gt->npage = pages;
215 208
216 for (i = 0; i < pages; i++) {
217 p = shmem_read_mapping_page(mapping, i);
218 if (IS_ERR(p))
219 goto err;
220 gt->pages[i] = p;
221 }
222 return 0; 209 return 0;
223
224err:
225 while (i--)
226 page_cache_release(gt->pages[i]);
227 kfree(gt->pages);
228 gt->pages = NULL;
229 return PTR_ERR(p);
230} 210}
231 211
232/** 212/**
@@ -240,13 +220,7 @@ err:
240 */ 220 */
241static void psb_gtt_detach_pages(struct gtt_range *gt) 221static void psb_gtt_detach_pages(struct gtt_range *gt)
242{ 222{
243 int i; 223 drm_gem_put_pages(&gt->gem, gt->pages, true, false);
244 for (i = 0; i < gt->npage; i++) {
245 /* FIXME: do we need to force dirty */
246 set_page_dirty(gt->pages[i]);
247 page_cache_release(gt->pages[i]);
248 }
249 kfree(gt->pages);
250 gt->pages = NULL; 224 gt->pages = NULL;
251} 225}
252 226
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 3abf8315f57c..860a4ee9baaf 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -249,12 +249,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
249 struct drm_encoder *encoder = connector->encoder; 249 struct drm_encoder *encoder = connector->encoder;
250 250
251 if (!strcmp(property->name, "scaling mode") && encoder) { 251 if (!strcmp(property->name, "scaling mode") && encoder) {
252 struct psb_intel_crtc *psb_crtc = 252 struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
253 to_psb_intel_crtc(encoder->crtc);
254 bool centerechange; 253 bool centerechange;
255 uint64_t val; 254 uint64_t val;
256 255
257 if (!psb_crtc) 256 if (!gma_crtc)
258 goto set_prop_error; 257 goto set_prop_error;
259 258
260 switch (value) { 259 switch (value) {
@@ -281,11 +280,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
281 centerechange = (val == DRM_MODE_SCALE_NO_SCALE) || 280 centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
282 (value == DRM_MODE_SCALE_NO_SCALE); 281 (value == DRM_MODE_SCALE_NO_SCALE);
283 282
284 if (psb_crtc->saved_mode.hdisplay != 0 && 283 if (gma_crtc->saved_mode.hdisplay != 0 &&
285 psb_crtc->saved_mode.vdisplay != 0) { 284 gma_crtc->saved_mode.vdisplay != 0) {
286 if (centerechange) { 285 if (centerechange) {
287 if (!drm_crtc_helper_set_mode(encoder->crtc, 286 if (!drm_crtc_helper_set_mode(encoder->crtc,
288 &psb_crtc->saved_mode, 287 &gma_crtc->saved_mode,
289 encoder->crtc->x, 288 encoder->crtc->x,
290 encoder->crtc->y, 289 encoder->crtc->y,
291 encoder->crtc->fb)) 290 encoder->crtc->fb))
@@ -294,8 +293,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
294 struct drm_encoder_helper_funcs *funcs = 293 struct drm_encoder_helper_funcs *funcs =
295 encoder->helper_private; 294 encoder->helper_private;
296 funcs->mode_set(encoder, 295 funcs->mode_set(encoder,
297 &psb_crtc->saved_mode, 296 &gma_crtc->saved_mode,
298 &psb_crtc->saved_adjusted_mode); 297 &gma_crtc->saved_adjusted_mode);
299 } 298 }
300 } 299 }
301 } else if (!strcmp(property->name, "backlight") && encoder) { 300 } else if (!strcmp(property->name, "backlight") && encoder) {
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 36eb0744841c..45d5af0546bf 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -227,7 +227,7 @@ enum {
227#define DSI_DPI_DISABLE_BTA BIT(3) 227#define DSI_DPI_DISABLE_BTA BIT(3)
228 228
229struct mdfld_dsi_connector { 229struct mdfld_dsi_connector {
230 struct psb_intel_connector base; 230 struct gma_connector base;
231 231
232 int pipe; 232 int pipe;
233 void *private; 233 void *private;
@@ -238,7 +238,7 @@ struct mdfld_dsi_connector {
238}; 238};
239 239
240struct mdfld_dsi_encoder { 240struct mdfld_dsi_encoder {
241 struct psb_intel_encoder base; 241 struct gma_encoder base;
242 void *private; 242 void *private;
243}; 243};
244 244
@@ -269,21 +269,21 @@ struct mdfld_dsi_config {
269static inline struct mdfld_dsi_connector *mdfld_dsi_connector( 269static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
270 struct drm_connector *connector) 270 struct drm_connector *connector)
271{ 271{
272 struct psb_intel_connector *psb_connector; 272 struct gma_connector *gma_connector;
273 273
274 psb_connector = to_psb_intel_connector(connector); 274 gma_connector = to_gma_connector(connector);
275 275
276 return container_of(psb_connector, struct mdfld_dsi_connector, base); 276 return container_of(gma_connector, struct mdfld_dsi_connector, base);
277} 277}
278 278
279static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder( 279static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
280 struct drm_encoder *encoder) 280 struct drm_encoder *encoder)
281{ 281{
282 struct psb_intel_encoder *psb_encoder; 282 struct gma_encoder *gma_encoder;
283 283
284 psb_encoder = to_psb_intel_encoder(encoder); 284 gma_encoder = to_gma_encoder(encoder);
285 285
286 return container_of(psb_encoder, struct mdfld_dsi_encoder, base); 286 return container_of(gma_encoder, struct mdfld_dsi_encoder, base);
287} 287}
288 288
289static inline struct mdfld_dsi_config * 289static inline struct mdfld_dsi_config *
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 74485dc43945..321c00a944e9 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -23,7 +23,7 @@
23 23
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include "psb_intel_reg.h" 25#include "psb_intel_reg.h"
26#include "psb_intel_display.h" 26#include "gma_display.h"
27#include "framebuffer.h" 27#include "framebuffer.h"
28#include "mdfld_output.h" 28#include "mdfld_output.h"
29#include "mdfld_dsi_output.h" 29#include "mdfld_dsi_output.h"
@@ -65,7 +65,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
65 } 65 }
66 66
67 /* FIXME JLIU7_PO */ 67 /* FIXME JLIU7_PO */
68 psb_intel_wait_for_vblank(dev); 68 gma_wait_for_vblank(dev);
69 return; 69 return;
70 70
71 /* Wait for for the pipe disable to take effect. */ 71 /* Wait for for the pipe disable to take effect. */
@@ -93,7 +93,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
93 } 93 }
94 94
95 /* FIXME JLIU7_PO */ 95 /* FIXME JLIU7_PO */
96 psb_intel_wait_for_vblank(dev); 96 gma_wait_for_vblank(dev);
97 return; 97 return;
98 98
99 /* Wait for for the pipe enable to take effect. */ 99 /* Wait for for the pipe enable to take effect. */
@@ -104,25 +104,6 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
104 } 104 }
105} 105}
106 106
107static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
108{
109 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
110 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
111}
112
113static void psb_intel_crtc_commit(struct drm_crtc *crtc)
114{
115 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
116 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
117}
118
119static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
120 const struct drm_display_mode *mode,
121 struct drm_display_mode *adjusted_mode)
122{
123 return true;
124}
125
126/** 107/**
127 * Return the pipe currently connected to the panel fitter, 108 * Return the pipe currently connected to the panel fitter,
128 * or -1 if the panel fitter is not present or not in use 109 * or -1 if the panel fitter is not present or not in use
@@ -184,9 +165,9 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
184{ 165{
185 struct drm_device *dev = crtc->dev; 166 struct drm_device *dev = crtc->dev;
186 struct drm_psb_private *dev_priv = dev->dev_private; 167 struct drm_psb_private *dev_priv = dev->dev_private;
187 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 168 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
188 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 169 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
189 int pipe = psb_intel_crtc->pipe; 170 int pipe = gma_crtc->pipe;
190 const struct psb_offset *map = &dev_priv->regmap[pipe]; 171 const struct psb_offset *map = &dev_priv->regmap[pipe];
191 unsigned long start, offset; 172 unsigned long start, offset;
192 u32 dspcntr; 173 u32 dspcntr;
@@ -324,8 +305,8 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
324{ 305{
325 struct drm_device *dev = crtc->dev; 306 struct drm_device *dev = crtc->dev;
326 struct drm_psb_private *dev_priv = dev->dev_private; 307 struct drm_psb_private *dev_priv = dev->dev_private;
327 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 308 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
328 int pipe = psb_intel_crtc->pipe; 309 int pipe = gma_crtc->pipe;
329 const struct psb_offset *map = &dev_priv->regmap[pipe]; 310 const struct psb_offset *map = &dev_priv->regmap[pipe];
330 u32 pipeconf = dev_priv->pipeconf[pipe]; 311 u32 pipeconf = dev_priv->pipeconf[pipe];
331 u32 temp; 312 u32 temp;
@@ -436,7 +417,7 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
436 } 417 }
437 } 418 }
438 419
439 psb_intel_crtc_load_lut(crtc); 420 gma_crtc_load_lut(crtc);
440 421
441 /* Give the overlay scaler a chance to enable 422 /* Give the overlay scaler a chance to enable
442 if it's on this pipe */ 423 if it's on this pipe */
@@ -611,8 +592,8 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
611 struct drm_device *dev = crtc->dev; 592 struct drm_device *dev = crtc->dev;
612 struct drm_psb_private *dev_priv = dev->dev_private; 593 struct drm_psb_private *dev_priv = dev->dev_private;
613 594
614 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI) 595 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
615 || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) { 596 || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
616 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) 597 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
617 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19]; 598 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
618 else if (ksel == KSEL_BYPASS_25) 599 else if (ksel == KSEL_BYPASS_25)
@@ -624,7 +605,7 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
624 (dev_priv->core_freq == 100 || 605 (dev_priv->core_freq == 100 ||
625 dev_priv->core_freq == 200)) 606 dev_priv->core_freq == 200))
626 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100]; 607 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
627 } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 608 } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
628 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) 609 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
629 limit = &mdfld_limits[MDFLD_LIMT_DPLL_19]; 610 limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
630 else if (ksel == KSEL_BYPASS_25) 611 else if (ksel == KSEL_BYPASS_25)
@@ -688,9 +669,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
688 struct drm_framebuffer *old_fb) 669 struct drm_framebuffer *old_fb)
689{ 670{
690 struct drm_device *dev = crtc->dev; 671 struct drm_device *dev = crtc->dev;
691 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 672 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
692 struct drm_psb_private *dev_priv = dev->dev_private; 673 struct drm_psb_private *dev_priv = dev->dev_private;
693 int pipe = psb_intel_crtc->pipe; 674 int pipe = gma_crtc->pipe;
694 const struct psb_offset *map = &dev_priv->regmap[pipe]; 675 const struct psb_offset *map = &dev_priv->regmap[pipe];
695 int refclk = 0; 676 int refclk = 0;
696 int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, 677 int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
@@ -700,7 +681,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
700 u32 dpll = 0, fp = 0; 681 u32 dpll = 0, fp = 0;
701 bool is_mipi = false, is_mipi2 = false, is_hdmi = false; 682 bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
702 struct drm_mode_config *mode_config = &dev->mode_config; 683 struct drm_mode_config *mode_config = &dev->mode_config;
703 struct psb_intel_encoder *psb_intel_encoder = NULL; 684 struct gma_encoder *gma_encoder = NULL;
704 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; 685 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
705 struct drm_encoder *encoder; 686 struct drm_encoder *encoder;
706 struct drm_connector *connector; 687 struct drm_connector *connector;
@@ -749,9 +730,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
749 if (!gma_power_begin(dev, true)) 730 if (!gma_power_begin(dev, true))
750 return 0; 731 return 0;
751 732
752 memcpy(&psb_intel_crtc->saved_mode, mode, 733 memcpy(&gma_crtc->saved_mode, mode,
753 sizeof(struct drm_display_mode)); 734 sizeof(struct drm_display_mode));
754 memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, 735 memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode,
755 sizeof(struct drm_display_mode)); 736 sizeof(struct drm_display_mode));
756 737
757 list_for_each_entry(connector, &mode_config->connector_list, head) { 738 list_for_each_entry(connector, &mode_config->connector_list, head) {
@@ -766,9 +747,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
766 if (encoder->crtc != crtc) 747 if (encoder->crtc != crtc)
767 continue; 748 continue;
768 749
769 psb_intel_encoder = psb_intel_attached_encoder(connector); 750 gma_encoder = gma_attached_encoder(connector);
770 751
771 switch (psb_intel_encoder->type) { 752 switch (gma_encoder->type) {
772 case INTEL_OUTPUT_MIPI: 753 case INTEL_OUTPUT_MIPI:
773 is_mipi = true; 754 is_mipi = true;
774 break; 755 break;
@@ -819,7 +800,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
819 800
820 REG_WRITE(map->pos, 0); 801 REG_WRITE(map->pos, 0);
821 802
822 if (psb_intel_encoder) 803 if (gma_encoder)
823 drm_object_property_get_value(&connector->base, 804 drm_object_property_get_value(&connector->base,
824 dev->mode_config.scaling_mode_property, &scalingType); 805 dev->mode_config.scaling_mode_property, &scalingType);
825 806
@@ -1034,7 +1015,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
1034 1015
1035 /* Wait for for the pipe enable to take effect. */ 1016 /* Wait for for the pipe enable to take effect. */
1036 REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]); 1017 REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
1037 psb_intel_wait_for_vblank(dev); 1018 gma_wait_for_vblank(dev);
1038 1019
1039mrst_crtc_mode_set_exit: 1020mrst_crtc_mode_set_exit:
1040 1021
@@ -1045,10 +1026,10 @@ mrst_crtc_mode_set_exit:
1045 1026
1046const struct drm_crtc_helper_funcs mdfld_helper_funcs = { 1027const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
1047 .dpms = mdfld_crtc_dpms, 1028 .dpms = mdfld_crtc_dpms,
1048 .mode_fixup = psb_intel_crtc_mode_fixup, 1029 .mode_fixup = gma_crtc_mode_fixup,
1049 .mode_set = mdfld_crtc_mode_set, 1030 .mode_set = mdfld_crtc_mode_set,
1050 .mode_set_base = mdfld__intel_pipe_set_base, 1031 .mode_set_base = mdfld__intel_pipe_set_base,
1051 .prepare = psb_intel_crtc_prepare, 1032 .prepare = gma_crtc_prepare,
1052 .commit = psb_intel_crtc_commit, 1033 .commit = gma_crtc_commit,
1053}; 1034};
1054 1035
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 3071526bc3c1..54c98962b73e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -23,7 +23,7 @@
23#include "psb_drv.h" 23#include "psb_drv.h"
24#include "psb_intel_drv.h" 24#include "psb_intel_drv.h"
25#include "psb_intel_reg.h" 25#include "psb_intel_reg.h"
26#include "psb_intel_display.h" 26#include "gma_display.h"
27#include "power.h" 27#include "power.h"
28 28
29struct psb_intel_range_t { 29struct psb_intel_range_t {
@@ -88,8 +88,8 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
88 struct drm_device *dev = crtc->dev; 88 struct drm_device *dev = crtc->dev;
89 struct drm_psb_private *dev_priv = dev->dev_private; 89 struct drm_psb_private *dev_priv = dev->dev_private;
90 90
91 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) 91 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
92 || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) { 92 || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
93 switch (dev_priv->core_freq) { 93 switch (dev_priv->core_freq) {
94 case 100: 94 case 100:
95 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L]; 95 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
@@ -163,8 +163,8 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
163{ 163{
164 struct drm_device *dev = crtc->dev; 164 struct drm_device *dev = crtc->dev;
165 struct drm_psb_private *dev_priv = dev->dev_private; 165 struct drm_psb_private *dev_priv = dev->dev_private;
166 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 166 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
167 int pipe = psb_intel_crtc->pipe; 167 int pipe = gma_crtc->pipe;
168 const struct psb_offset *map = &dev_priv->regmap[pipe]; 168 const struct psb_offset *map = &dev_priv->regmap[pipe];
169 u32 temp; 169 u32 temp;
170 170
@@ -212,7 +212,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
212 REG_WRITE(map->base, REG_READ(map->base)); 212 REG_WRITE(map->base, REG_READ(map->base));
213 } 213 }
214 214
215 psb_intel_crtc_load_lut(crtc); 215 gma_crtc_load_lut(crtc);
216 216
217 /* Give the overlay scaler a chance to enable 217 /* Give the overlay scaler a chance to enable
218 if it's on this pipe */ 218 if it's on this pipe */
@@ -242,7 +242,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
242 REG_READ(map->conf); 242 REG_READ(map->conf);
243 } 243 }
244 /* Wait for for the pipe disable to take effect. */ 244 /* Wait for for the pipe disable to take effect. */
245 psb_intel_wait_for_vblank(dev); 245 gma_wait_for_vblank(dev);
246 246
247 temp = REG_READ(map->dpll); 247 temp = REG_READ(map->dpll);
248 if ((temp & DPLL_VCO_ENABLE) != 0) { 248 if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -292,9 +292,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
292 struct drm_framebuffer *old_fb) 292 struct drm_framebuffer *old_fb)
293{ 293{
294 struct drm_device *dev = crtc->dev; 294 struct drm_device *dev = crtc->dev;
295 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 295 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
296 struct drm_psb_private *dev_priv = dev->dev_private; 296 struct drm_psb_private *dev_priv = dev->dev_private;
297 int pipe = psb_intel_crtc->pipe; 297 int pipe = gma_crtc->pipe;
298 const struct psb_offset *map = &dev_priv->regmap[pipe]; 298 const struct psb_offset *map = &dev_priv->regmap[pipe];
299 int refclk = 0; 299 int refclk = 0;
300 struct oaktrail_clock_t clock; 300 struct oaktrail_clock_t clock;
@@ -303,7 +303,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
303 bool is_lvds = false; 303 bool is_lvds = false;
304 bool is_mipi = false; 304 bool is_mipi = false;
305 struct drm_mode_config *mode_config = &dev->mode_config; 305 struct drm_mode_config *mode_config = &dev->mode_config;
306 struct psb_intel_encoder *psb_intel_encoder = NULL; 306 struct gma_encoder *gma_encoder = NULL;
307 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; 307 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
308 struct drm_connector *connector; 308 struct drm_connector *connector;
309 309
@@ -313,10 +313,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
313 if (!gma_power_begin(dev, true)) 313 if (!gma_power_begin(dev, true))
314 return 0; 314 return 0;
315 315
316 memcpy(&psb_intel_crtc->saved_mode, 316 memcpy(&gma_crtc->saved_mode,
317 mode, 317 mode,
318 sizeof(struct drm_display_mode)); 318 sizeof(struct drm_display_mode));
319 memcpy(&psb_intel_crtc->saved_adjusted_mode, 319 memcpy(&gma_crtc->saved_adjusted_mode,
320 adjusted_mode, 320 adjusted_mode,
321 sizeof(struct drm_display_mode)); 321 sizeof(struct drm_display_mode));
322 322
@@ -324,9 +324,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
324 if (!connector->encoder || connector->encoder->crtc != crtc) 324 if (!connector->encoder || connector->encoder->crtc != crtc)
325 continue; 325 continue;
326 326
327 psb_intel_encoder = psb_intel_attached_encoder(connector); 327 gma_encoder = gma_attached_encoder(connector);
328 328
329 switch (psb_intel_encoder->type) { 329 switch (gma_encoder->type) {
330 case INTEL_OUTPUT_LVDS: 330 case INTEL_OUTPUT_LVDS:
331 is_lvds = true; 331 is_lvds = true;
332 break; 332 break;
@@ -350,7 +350,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
350 ((mode->crtc_hdisplay - 1) << 16) | 350 ((mode->crtc_hdisplay - 1) << 16) |
351 (mode->crtc_vdisplay - 1)); 351 (mode->crtc_vdisplay - 1));
352 352
353 if (psb_intel_encoder) 353 if (gma_encoder)
354 drm_object_property_get_value(&connector->base, 354 drm_object_property_get_value(&connector->base,
355 dev->mode_config.scaling_mode_property, &scalingType); 355 dev->mode_config.scaling_mode_property, &scalingType);
356 356
@@ -484,31 +484,24 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
484 484
485 REG_WRITE(map->conf, pipeconf); 485 REG_WRITE(map->conf, pipeconf);
486 REG_READ(map->conf); 486 REG_READ(map->conf);
487 psb_intel_wait_for_vblank(dev); 487 gma_wait_for_vblank(dev);
488 488
489 REG_WRITE(map->cntr, dspcntr); 489 REG_WRITE(map->cntr, dspcntr);
490 psb_intel_wait_for_vblank(dev); 490 gma_wait_for_vblank(dev);
491 491
492oaktrail_crtc_mode_set_exit: 492oaktrail_crtc_mode_set_exit:
493 gma_power_end(dev); 493 gma_power_end(dev);
494 return 0; 494 return 0;
495} 495}
496 496
497static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
498 const struct drm_display_mode *mode,
499 struct drm_display_mode *adjusted_mode)
500{
501 return true;
502}
503
504static int oaktrail_pipe_set_base(struct drm_crtc *crtc, 497static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
505 int x, int y, struct drm_framebuffer *old_fb) 498 int x, int y, struct drm_framebuffer *old_fb)
506{ 499{
507 struct drm_device *dev = crtc->dev; 500 struct drm_device *dev = crtc->dev;
508 struct drm_psb_private *dev_priv = dev->dev_private; 501 struct drm_psb_private *dev_priv = dev->dev_private;
509 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 502 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
510 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 503 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
511 int pipe = psb_intel_crtc->pipe; 504 int pipe = gma_crtc->pipe;
512 const struct psb_offset *map = &dev_priv->regmap[pipe]; 505 const struct psb_offset *map = &dev_priv->regmap[pipe];
513 unsigned long start, offset; 506 unsigned long start, offset;
514 507
@@ -563,24 +556,12 @@ pipe_set_base_exit:
563 return ret; 556 return ret;
564} 557}
565 558
566static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
567{
568 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
569 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
570}
571
572static void oaktrail_crtc_commit(struct drm_crtc *crtc)
573{
574 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
575 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
576}
577
578const struct drm_crtc_helper_funcs oaktrail_helper_funcs = { 559const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
579 .dpms = oaktrail_crtc_dpms, 560 .dpms = oaktrail_crtc_dpms,
580 .mode_fixup = oaktrail_crtc_mode_fixup, 561 .mode_fixup = gma_crtc_mode_fixup,
581 .mode_set = oaktrail_crtc_mode_set, 562 .mode_set = oaktrail_crtc_mode_set,
582 .mode_set_base = oaktrail_pipe_set_base, 563 .mode_set_base = oaktrail_pipe_set_base,
583 .prepare = oaktrail_crtc_prepare, 564 .prepare = gma_crtc_prepare,
584 .commit = oaktrail_crtc_commit, 565 .commit = gma_crtc_commit,
585}; 566};
586 567
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f036f1fc161e..38153143ed8c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,12 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
155 HDMI_READ(HDMI_HCR); 155 HDMI_READ(HDMI_HCR);
156} 156}
157 157
158static void wait_for_vblank(struct drm_device *dev)
159{
160 /* Wait for 20ms, i.e. one cycle at 50hz. */
161 mdelay(20);
162}
163
164static unsigned int htotal_calculate(struct drm_display_mode *mode) 158static unsigned int htotal_calculate(struct drm_display_mode *mode)
165{ 159{
166 u32 htotal, new_crtc_htotal; 160 u32 htotal, new_crtc_htotal;
@@ -372,10 +366,10 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
372 366
373 REG_WRITE(PCH_PIPEBCONF, pipeconf); 367 REG_WRITE(PCH_PIPEBCONF, pipeconf);
374 REG_READ(PCH_PIPEBCONF); 368 REG_READ(PCH_PIPEBCONF);
375 wait_for_vblank(dev); 369 gma_wait_for_vblank(dev);
376 370
377 REG_WRITE(dspcntr_reg, dspcntr); 371 REG_WRITE(dspcntr_reg, dspcntr);
378 wait_for_vblank(dev); 372 gma_wait_for_vblank(dev);
379 373
380 gma_power_end(dev); 374 gma_power_end(dev);
381 375
@@ -459,7 +453,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
459 REG_READ(PCH_PIPEBCONF); 453 REG_READ(PCH_PIPEBCONF);
460 } 454 }
461 455
462 wait_for_vblank(dev); 456 gma_wait_for_vblank(dev);
463 457
464 /* Enable plane */ 458 /* Enable plane */
465 temp = REG_READ(DSPBCNTR); 459 temp = REG_READ(DSPBCNTR);
@@ -470,7 +464,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
470 REG_READ(DSPBSURF); 464 REG_READ(DSPBSURF);
471 } 465 }
472 466
473 psb_intel_crtc_load_lut(crtc); 467 gma_crtc_load_lut(crtc);
474 } 468 }
475 469
476 /* DSPARB */ 470 /* DSPARB */
@@ -615,16 +609,16 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
615static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = { 609static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
616 .dpms = oaktrail_hdmi_dpms, 610 .dpms = oaktrail_hdmi_dpms,
617 .mode_fixup = oaktrail_hdmi_mode_fixup, 611 .mode_fixup = oaktrail_hdmi_mode_fixup,
618 .prepare = psb_intel_encoder_prepare, 612 .prepare = gma_encoder_prepare,
619 .mode_set = oaktrail_hdmi_mode_set, 613 .mode_set = oaktrail_hdmi_mode_set,
620 .commit = psb_intel_encoder_commit, 614 .commit = gma_encoder_commit,
621}; 615};
622 616
623static const struct drm_connector_helper_funcs 617static const struct drm_connector_helper_funcs
624 oaktrail_hdmi_connector_helper_funcs = { 618 oaktrail_hdmi_connector_helper_funcs = {
625 .get_modes = oaktrail_hdmi_get_modes, 619 .get_modes = oaktrail_hdmi_get_modes,
626 .mode_valid = oaktrail_hdmi_mode_valid, 620 .mode_valid = oaktrail_hdmi_mode_valid,
627 .best_encoder = psb_intel_best_encoder, 621 .best_encoder = gma_best_encoder,
628}; 622};
629 623
630static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = { 624static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
@@ -646,21 +640,21 @@ static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
646void oaktrail_hdmi_init(struct drm_device *dev, 640void oaktrail_hdmi_init(struct drm_device *dev,
647 struct psb_intel_mode_device *mode_dev) 641 struct psb_intel_mode_device *mode_dev)
648{ 642{
649 struct psb_intel_encoder *psb_intel_encoder; 643 struct gma_encoder *gma_encoder;
650 struct psb_intel_connector *psb_intel_connector; 644 struct gma_connector *gma_connector;
651 struct drm_connector *connector; 645 struct drm_connector *connector;
652 struct drm_encoder *encoder; 646 struct drm_encoder *encoder;
653 647
654 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 648 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
655 if (!psb_intel_encoder) 649 if (!gma_encoder)
656 return; 650 return;
657 651
658 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 652 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
659 if (!psb_intel_connector) 653 if (!gma_connector)
660 goto failed_connector; 654 goto failed_connector;
661 655
662 connector = &psb_intel_connector->base; 656 connector = &gma_connector->base;
663 encoder = &psb_intel_encoder->base; 657 encoder = &gma_encoder->base;
664 drm_connector_init(dev, connector, 658 drm_connector_init(dev, connector,
665 &oaktrail_hdmi_connector_funcs, 659 &oaktrail_hdmi_connector_funcs,
666 DRM_MODE_CONNECTOR_DVID); 660 DRM_MODE_CONNECTOR_DVID);
@@ -669,10 +663,9 @@ void oaktrail_hdmi_init(struct drm_device *dev,
669 &oaktrail_hdmi_enc_funcs, 663 &oaktrail_hdmi_enc_funcs,
670 DRM_MODE_ENCODER_TMDS); 664 DRM_MODE_ENCODER_TMDS);
671 665
672 psb_intel_connector_attach_encoder(psb_intel_connector, 666 gma_connector_attach_encoder(gma_connector, gma_encoder);
673 psb_intel_encoder);
674 667
675 psb_intel_encoder->type = INTEL_OUTPUT_HDMI; 668 gma_encoder->type = INTEL_OUTPUT_HDMI;
676 drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs); 669 drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
677 drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs); 670 drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
678 671
@@ -685,7 +678,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
685 return; 678 return;
686 679
687failed_connector: 680failed_connector:
688 kfree(psb_intel_encoder); 681 kfree(gma_encoder);
689} 682}
690 683
691static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = { 684static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 325013a9c48c..e77d7214fca4 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -43,7 +43,7 @@
43 * Sets the power state for the panel. 43 * Sets the power state for the panel.
44 */ 44 */
45static void oaktrail_lvds_set_power(struct drm_device *dev, 45static void oaktrail_lvds_set_power(struct drm_device *dev,
46 struct psb_intel_encoder *psb_intel_encoder, 46 struct gma_encoder *gma_encoder,
47 bool on) 47 bool on)
48{ 48{
49 u32 pp_status; 49 u32 pp_status;
@@ -78,13 +78,12 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
78static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode) 78static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
79{ 79{
80 struct drm_device *dev = encoder->dev; 80 struct drm_device *dev = encoder->dev;
81 struct psb_intel_encoder *psb_intel_encoder = 81 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
82 to_psb_intel_encoder(encoder);
83 82
84 if (mode == DRM_MODE_DPMS_ON) 83 if (mode == DRM_MODE_DPMS_ON)
85 oaktrail_lvds_set_power(dev, psb_intel_encoder, true); 84 oaktrail_lvds_set_power(dev, gma_encoder, true);
86 else 85 else
87 oaktrail_lvds_set_power(dev, psb_intel_encoder, false); 86 oaktrail_lvds_set_power(dev, gma_encoder, false);
88 87
89 /* XXX: We never power down the LVDS pairs. */ 88 /* XXX: We never power down the LVDS pairs. */
90} 89}
@@ -166,8 +165,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
166{ 165{
167 struct drm_device *dev = encoder->dev; 166 struct drm_device *dev = encoder->dev;
168 struct drm_psb_private *dev_priv = dev->dev_private; 167 struct drm_psb_private *dev_priv = dev->dev_private;
169 struct psb_intel_encoder *psb_intel_encoder = 168 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
170 to_psb_intel_encoder(encoder);
171 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 169 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
172 170
173 if (!gma_power_begin(dev, true)) 171 if (!gma_power_begin(dev, true))
@@ -176,7 +174,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
176 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); 174 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
177 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & 175 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
178 BACKLIGHT_DUTY_CYCLE_MASK); 176 BACKLIGHT_DUTY_CYCLE_MASK);
179 oaktrail_lvds_set_power(dev, psb_intel_encoder, false); 177 oaktrail_lvds_set_power(dev, gma_encoder, false);
180 gma_power_end(dev); 178 gma_power_end(dev);
181} 179}
182 180
@@ -203,14 +201,13 @@ static void oaktrail_lvds_commit(struct drm_encoder *encoder)
203{ 201{
204 struct drm_device *dev = encoder->dev; 202 struct drm_device *dev = encoder->dev;
205 struct drm_psb_private *dev_priv = dev->dev_private; 203 struct drm_psb_private *dev_priv = dev->dev_private;
206 struct psb_intel_encoder *psb_intel_encoder = 204 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
207 to_psb_intel_encoder(encoder);
208 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 205 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
209 206
210 if (mode_dev->backlight_duty_cycle == 0) 207 if (mode_dev->backlight_duty_cycle == 0)
211 mode_dev->backlight_duty_cycle = 208 mode_dev->backlight_duty_cycle =
212 oaktrail_lvds_get_max_backlight(dev); 209 oaktrail_lvds_get_max_backlight(dev);
213 oaktrail_lvds_set_power(dev, psb_intel_encoder, true); 210 oaktrail_lvds_set_power(dev, gma_encoder, true);
214} 211}
215 212
216static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = { 213static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
@@ -325,8 +322,8 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
325void oaktrail_lvds_init(struct drm_device *dev, 322void oaktrail_lvds_init(struct drm_device *dev,
326 struct psb_intel_mode_device *mode_dev) 323 struct psb_intel_mode_device *mode_dev)
327{ 324{
328 struct psb_intel_encoder *psb_intel_encoder; 325 struct gma_encoder *gma_encoder;
329 struct psb_intel_connector *psb_intel_connector; 326 struct gma_connector *gma_connector;
330 struct drm_connector *connector; 327 struct drm_connector *connector;
331 struct drm_encoder *encoder; 328 struct drm_encoder *encoder;
332 struct drm_psb_private *dev_priv = dev->dev_private; 329 struct drm_psb_private *dev_priv = dev->dev_private;
@@ -334,16 +331,16 @@ void oaktrail_lvds_init(struct drm_device *dev,
334 struct i2c_adapter *i2c_adap; 331 struct i2c_adapter *i2c_adap;
335 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 332 struct drm_display_mode *scan; /* *modes, *bios_mode; */
336 333
337 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 334 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
338 if (!psb_intel_encoder) 335 if (!gma_encoder)
339 return; 336 return;
340 337
341 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 338 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
342 if (!psb_intel_connector) 339 if (!gma_connector)
343 goto failed_connector; 340 goto failed_connector;
344 341
345 connector = &psb_intel_connector->base; 342 connector = &gma_connector->base;
346 encoder = &psb_intel_encoder->base; 343 encoder = &gma_encoder->base;
347 dev_priv->is_lvds_on = true; 344 dev_priv->is_lvds_on = true;
348 drm_connector_init(dev, connector, 345 drm_connector_init(dev, connector,
349 &psb_intel_lvds_connector_funcs, 346 &psb_intel_lvds_connector_funcs,
@@ -352,9 +349,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
352 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 349 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
353 DRM_MODE_ENCODER_LVDS); 350 DRM_MODE_ENCODER_LVDS);
354 351
355 psb_intel_connector_attach_encoder(psb_intel_connector, 352 gma_connector_attach_encoder(gma_connector, gma_encoder);
356 psb_intel_encoder); 353 gma_encoder->type = INTEL_OUTPUT_LVDS;
357 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
358 354
359 drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs); 355 drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
360 drm_connector_helper_add(connector, 356 drm_connector_helper_add(connector,
@@ -434,15 +430,15 @@ out:
434 430
435failed_find: 431failed_find:
436 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n"); 432 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
437 if (psb_intel_encoder->ddc_bus) 433 if (gma_encoder->ddc_bus)
438 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); 434 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
439 435
440/* failed_ddc: */ 436/* failed_ddc: */
441 437
442 drm_encoder_cleanup(encoder); 438 drm_encoder_cleanup(encoder);
443 drm_connector_cleanup(connector); 439 drm_connector_cleanup(connector);
444 kfree(psb_intel_connector); 440 kfree(gma_connector);
445failed_connector: 441failed_connector:
446 kfree(psb_intel_encoder); 442 kfree(gma_encoder);
447} 443}
448 444
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index f6f534b4197e..697678619bd1 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -25,7 +25,7 @@
25#include "psb_reg.h" 25#include "psb_reg.h"
26#include "psb_intel_reg.h" 26#include "psb_intel_reg.h"
27#include "intel_bios.h" 27#include "intel_bios.h"
28 28#include "psb_device.h"
29 29
30static int psb_output_init(struct drm_device *dev) 30static int psb_output_init(struct drm_device *dev)
31{ 31{
@@ -380,6 +380,7 @@ const struct psb_ops psb_chip_ops = {
380 380
381 .crtc_helper = &psb_intel_helper_funcs, 381 .crtc_helper = &psb_intel_helper_funcs,
382 .crtc_funcs = &psb_intel_crtc_funcs, 382 .crtc_funcs = &psb_intel_crtc_funcs,
383 .clock_funcs = &psb_clock_funcs,
383 384
384 .output_init = psb_output_init, 385 .output_init = psb_output_init,
385 386
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_device.h
index 3724b971e91c..35e304c7f85a 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.h
+++ b/drivers/gpu/drm/gma500/psb_device.h
@@ -1,4 +1,6 @@
1/* copyright (c) 2008, Intel Corporation 1/*
2 * Copyright © 2013 Patrik Jakobsson
3 * Copyright © 2011 Intel Corporation
2 * 4 *
3 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
4 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -12,14 +14,11 @@
12 * You should have received a copy of the GNU General Public License along with 14 * You should have received a copy of the GNU General Public License along with
13 * this program; if not, write to the Free Software Foundation, Inc., 15 * this program; if not, write to the Free Software Foundation, Inc.,
14 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15 *
16 * Authors:
17 * Eric Anholt <eric@anholt.net>
18 */ 17 */
19 18
20#ifndef _INTEL_DISPLAY_H_ 19#ifndef _PSB_DEVICE_H_
21#define _INTEL_DISPLAY_H_ 20#define _PSB_DEVICE_H_
22 21
23bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type); 22extern const struct gma_clock_funcs psb_clock_funcs;
24 23
25#endif 24#endif
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index bddea5807442..fcb4e9ff1f20 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -131,7 +131,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
131static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data, 131static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
132 struct drm_file *file_priv); 132 struct drm_file *file_priv);
133 133
134static struct drm_ioctl_desc psb_ioctls[] = { 134static const struct drm_ioctl_desc psb_ioctls[] = {
135 DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH), 135 DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
136 DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl, 136 DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
137 DRM_AUTH), 137 DRM_AUTH),
@@ -270,7 +270,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
270 unsigned long irqflags; 270 unsigned long irqflags;
271 int ret = -ENOMEM; 271 int ret = -ENOMEM;
272 struct drm_connector *connector; 272 struct drm_connector *connector;
273 struct psb_intel_encoder *psb_intel_encoder; 273 struct gma_encoder *gma_encoder;
274 274
275 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 275 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
276 if (dev_priv == NULL) 276 if (dev_priv == NULL)
@@ -372,9 +372,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
372 /* Only add backlight support if we have LVDS output */ 372 /* Only add backlight support if we have LVDS output */
373 list_for_each_entry(connector, &dev->mode_config.connector_list, 373 list_for_each_entry(connector, &dev->mode_config.connector_list,
374 head) { 374 head) {
375 psb_intel_encoder = psb_intel_attached_encoder(connector); 375 gma_encoder = gma_attached_encoder(connector);
376 376
377 switch (psb_intel_encoder->type) { 377 switch (gma_encoder->type) {
378 case INTEL_OUTPUT_LVDS: 378 case INTEL_OUTPUT_LVDS:
379 case INTEL_OUTPUT_MIPI: 379 case INTEL_OUTPUT_MIPI:
380 ret = gma_backlight_init(dev); 380 ret = gma_backlight_init(dev);
@@ -441,7 +441,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
441 struct drm_mode_object *obj; 441 struct drm_mode_object *obj;
442 struct drm_crtc *crtc; 442 struct drm_crtc *crtc;
443 struct drm_connector *connector; 443 struct drm_connector *connector;
444 struct psb_intel_crtc *psb_intel_crtc; 444 struct gma_crtc *gma_crtc;
445 int i = 0; 445 int i = 0;
446 int32_t obj_id; 446 int32_t obj_id;
447 447
@@ -454,12 +454,12 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
454 454
455 connector = obj_to_connector(obj); 455 connector = obj_to_connector(obj);
456 crtc = connector->encoder->crtc; 456 crtc = connector->encoder->crtc;
457 psb_intel_crtc = to_psb_intel_crtc(crtc); 457 gma_crtc = to_gma_crtc(crtc);
458 458
459 for (i = 0; i < 256; i++) 459 for (i = 0; i < 256; i++)
460 psb_intel_crtc->lut_adj[i] = lut_arg->lut[i]; 460 gma_crtc->lut_adj[i] = lut_arg->lut[i];
461 461
462 psb_intel_crtc_load_lut(crtc); 462 gma_crtc_load_lut(crtc);
463 463
464 return 0; 464 return 0;
465} 465}
@@ -622,13 +622,12 @@ static const struct file_operations psb_gem_fops = {
622 .unlocked_ioctl = psb_unlocked_ioctl, 622 .unlocked_ioctl = psb_unlocked_ioctl,
623 .mmap = drm_gem_mmap, 623 .mmap = drm_gem_mmap,
624 .poll = drm_poll, 624 .poll = drm_poll,
625 .fasync = drm_fasync,
626 .read = drm_read, 625 .read = drm_read,
627}; 626};
628 627
629static struct drm_driver driver = { 628static struct drm_driver driver = {
630 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \ 629 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
631 DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM , 630 DRIVER_MODESET | DRIVER_GEM ,
632 .load = psb_driver_load, 631 .load = psb_driver_load,
633 .unload = psb_driver_unload, 632 .unload = psb_driver_unload,
634 633
@@ -652,7 +651,7 @@ static struct drm_driver driver = {
652 .gem_vm_ops = &psb_gem_vm_ops, 651 .gem_vm_ops = &psb_gem_vm_ops,
653 .dumb_create = psb_gem_dumb_create, 652 .dumb_create = psb_gem_dumb_create,
654 .dumb_map_offset = psb_gem_dumb_map_gtt, 653 .dumb_map_offset = psb_gem_dumb_map_gtt,
655 .dumb_destroy = psb_gem_dumb_destroy, 654 .dumb_destroy = drm_gem_dumb_destroy,
656 .fops = &psb_gem_fops, 655 .fops = &psb_gem_fops,
657 .name = DRIVER_NAME, 656 .name = DRIVER_NAME,
658 .desc = DRIVER_DESC, 657 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 6053b8abcd12..4535ac7708f8 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -27,6 +27,7 @@
27#include <drm/gma_drm.h> 27#include <drm/gma_drm.h>
28#include "psb_reg.h" 28#include "psb_reg.h"
29#include "psb_intel_drv.h" 29#include "psb_intel_drv.h"
30#include "gma_display.h"
30#include "intel_bios.h" 31#include "intel_bios.h"
31#include "gtt.h" 32#include "gtt.h"
32#include "power.h" 33#include "power.h"
@@ -46,6 +47,7 @@ enum {
46#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108) 47#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
47#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100) 48#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
48#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130) 49#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
50#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
49 51
50/* 52/*
51 * Driver definitions 53 * Driver definitions
@@ -675,6 +677,7 @@ struct psb_ops {
675 /* Sub functions */ 677 /* Sub functions */
676 struct drm_crtc_helper_funcs const *crtc_helper; 678 struct drm_crtc_helper_funcs const *crtc_helper;
677 struct drm_crtc_funcs const *crtc_funcs; 679 struct drm_crtc_funcs const *crtc_funcs;
680 const struct gma_clock_funcs *clock_funcs;
678 681
679 /* Setup hooks */ 682 /* Setup hooks */
680 int (*chip_setup)(struct drm_device *dev); 683 int (*chip_setup)(struct drm_device *dev);
@@ -692,6 +695,8 @@ struct psb_ops {
692 int (*restore_regs)(struct drm_device *dev); 695 int (*restore_regs)(struct drm_device *dev);
693 int (*power_up)(struct drm_device *dev); 696 int (*power_up)(struct drm_device *dev);
694 int (*power_down)(struct drm_device *dev); 697 int (*power_down)(struct drm_device *dev);
698 void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc);
699 void (*disable_sr)(struct drm_device *dev);
695 700
696 void (*lvds_bl_power)(struct drm_device *dev, bool on); 701 void (*lvds_bl_power)(struct drm_device *dev, bool on);
697#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 702#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -838,8 +843,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
838 struct drm_file *file); 843 struct drm_file *file);
839extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 844extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
840 struct drm_mode_create_dumb *args); 845 struct drm_mode_create_dumb *args);
841extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
842 uint32_t handle);
843extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, 846extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
844 uint32_t handle, uint64_t *offset); 847 uint32_t handle, uint64_t *offset);
845extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 848extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6666493789d1..97f8a03fee43 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -19,46 +19,19 @@
19 */ 19 */
20 20
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23 22
24#include <drm/drmP.h> 23#include <drm/drmP.h>
25#include "framebuffer.h" 24#include "framebuffer.h"
26#include "psb_drv.h" 25#include "psb_drv.h"
27#include "psb_intel_drv.h" 26#include "psb_intel_drv.h"
28#include "psb_intel_reg.h" 27#include "psb_intel_reg.h"
29#include "psb_intel_display.h" 28#include "gma_display.h"
30#include "power.h" 29#include "power.h"
31 30
32struct psb_intel_clock_t {
33 /* given values */
34 int n;
35 int m1, m2;
36 int p1, p2;
37 /* derived values */
38 int dot;
39 int vco;
40 int m;
41 int p;
42};
43
44struct psb_intel_range_t {
45 int min, max;
46};
47
48struct psb_intel_p2_t {
49 int dot_limit;
50 int p2_slow, p2_fast;
51};
52
53struct psb_intel_limit_t {
54 struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
55 struct psb_intel_p2_t p2;
56};
57
58#define INTEL_LIMIT_I9XX_SDVO_DAC 0 31#define INTEL_LIMIT_I9XX_SDVO_DAC 0
59#define INTEL_LIMIT_I9XX_LVDS 1 32#define INTEL_LIMIT_I9XX_LVDS 1
60 33
61static const struct psb_intel_limit_t psb_intel_limits[] = { 34static const struct gma_limit_t psb_intel_limits[] = {
62 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 35 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
63 .dot = {.min = 20000, .max = 400000}, 36 .dot = {.min = 20000, .max = 400000},
64 .vco = {.min = 1400000, .max = 2800000}, 37 .vco = {.min = 1400000, .max = 2800000},
@@ -68,8 +41,8 @@ static const struct psb_intel_limit_t psb_intel_limits[] = {
68 .m2 = {.min = 3, .max = 7}, 41 .m2 = {.min = 3, .max = 7},
69 .p = {.min = 5, .max = 80}, 42 .p = {.min = 5, .max = 80},
70 .p1 = {.min = 1, .max = 8}, 43 .p1 = {.min = 1, .max = 8},
71 .p2 = {.dot_limit = 200000, 44 .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 5},
72 .p2_slow = 10, .p2_fast = 5}, 45 .find_pll = gma_find_best_pll,
73 }, 46 },
74 { /* INTEL_LIMIT_I9XX_LVDS */ 47 { /* INTEL_LIMIT_I9XX_LVDS */
75 .dot = {.min = 20000, .max = 400000}, 48 .dot = {.min = 20000, .max = 400000},
@@ -83,23 +56,24 @@ static const struct psb_intel_limit_t psb_intel_limits[] = {
83 /* The single-channel range is 25-112Mhz, and dual-channel 56 /* The single-channel range is 25-112Mhz, and dual-channel
84 * is 80-224Mhz. Prefer single channel as much as possible. 57 * is 80-224Mhz. Prefer single channel as much as possible.
85 */ 58 */
86 .p2 = {.dot_limit = 112000, 59 .p2 = {.dot_limit = 112000, .p2_slow = 14, .p2_fast = 7},
87 .p2_slow = 14, .p2_fast = 7}, 60 .find_pll = gma_find_best_pll,
88 }, 61 },
89}; 62};
90 63
91static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc) 64static const struct gma_limit_t *psb_intel_limit(struct drm_crtc *crtc,
65 int refclk)
92{ 66{
93 const struct psb_intel_limit_t *limit; 67 const struct gma_limit_t *limit;
94 68
95 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 69 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
96 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS]; 70 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
97 else 71 else
98 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 72 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
99 return limit; 73 return limit;
100} 74}
101 75
102static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock) 76static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
103{ 77{
104 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 78 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
105 clock->p = clock->p1 * clock->p2; 79 clock->p = clock->p1 * clock->p2;
@@ -108,353 +82,6 @@ static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
108} 82}
109 83
110/** 84/**
111 * Returns whether any output on the specified pipe is of the specified type
112 */
113bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
114{
115 struct drm_device *dev = crtc->dev;
116 struct drm_mode_config *mode_config = &dev->mode_config;
117 struct drm_connector *l_entry;
118
119 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
120 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
121 struct psb_intel_encoder *psb_intel_encoder =
122 psb_intel_attached_encoder(l_entry);
123 if (psb_intel_encoder->type == type)
124 return true;
125 }
126 }
127 return false;
128}
129
130#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
131/**
132 * Returns whether the given set of divisors are valid for a given refclk with
133 * the given connectors.
134 */
135
136static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
137 struct psb_intel_clock_t *clock)
138{
139 const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
140
141 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
142 INTELPllInvalid("p1 out of range\n");
143 if (clock->p < limit->p.min || limit->p.max < clock->p)
144 INTELPllInvalid("p out of range\n");
145 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
146 INTELPllInvalid("m2 out of range\n");
147 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
148 INTELPllInvalid("m1 out of range\n");
149 if (clock->m1 <= clock->m2)
150 INTELPllInvalid("m1 <= m2\n");
151 if (clock->m < limit->m.min || limit->m.max < clock->m)
152 INTELPllInvalid("m out of range\n");
153 if (clock->n < limit->n.min || limit->n.max < clock->n)
154 INTELPllInvalid("n out of range\n");
155 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
156 INTELPllInvalid("vco out of range\n");
157 /* XXX: We may need to be checking "Dot clock"
158 * depending on the multiplier, connector, etc.,
159 * rather than just a single range.
160 */
161 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
162 INTELPllInvalid("dot out of range\n");
163
164 return true;
165}
166
167/**
168 * Returns a set of divisors for the desired target clock with the given
169 * refclk, or FALSE. The returned values represent the clock equation:
170 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
171 */
172static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
173 int refclk,
174 struct psb_intel_clock_t *best_clock)
175{
176 struct drm_device *dev = crtc->dev;
177 struct psb_intel_clock_t clock;
178 const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
179 int err = target;
180
181 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
182 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
183 /*
184 * For LVDS, if the panel is on, just rely on its current
185 * settings for dual-channel. We haven't figured out how to
186 * reliably set up different single/dual channel state, if we
187 * even can.
188 */
189 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
190 LVDS_CLKB_POWER_UP)
191 clock.p2 = limit->p2.p2_fast;
192 else
193 clock.p2 = limit->p2.p2_slow;
194 } else {
195 if (target < limit->p2.dot_limit)
196 clock.p2 = limit->p2.p2_slow;
197 else
198 clock.p2 = limit->p2.p2_fast;
199 }
200
201 memset(best_clock, 0, sizeof(*best_clock));
202
203 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
204 clock.m1++) {
205 for (clock.m2 = limit->m2.min;
206 clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
207 clock.m2++) {
208 for (clock.n = limit->n.min;
209 clock.n <= limit->n.max; clock.n++) {
210 for (clock.p1 = limit->p1.min;
211 clock.p1 <= limit->p1.max;
212 clock.p1++) {
213 int this_err;
214
215 psb_intel_clock(refclk, &clock);
216
217 if (!psb_intel_PLL_is_valid
218 (crtc, &clock))
219 continue;
220
221 this_err = abs(clock.dot - target);
222 if (this_err < err) {
223 *best_clock = clock;
224 err = this_err;
225 }
226 }
227 }
228 }
229 }
230
231 return err != target;
232}
233
234void psb_intel_wait_for_vblank(struct drm_device *dev)
235{
236 /* Wait for 20ms, i.e. one cycle at 50hz. */
237 mdelay(20);
238}
239
240static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
241 int x, int y, struct drm_framebuffer *old_fb)
242{
243 struct drm_device *dev = crtc->dev;
244 struct drm_psb_private *dev_priv = dev->dev_private;
245 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
246 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
247 int pipe = psb_intel_crtc->pipe;
248 const struct psb_offset *map = &dev_priv->regmap[pipe];
249 unsigned long start, offset;
250 u32 dspcntr;
251 int ret = 0;
252
253 if (!gma_power_begin(dev, true))
254 return 0;
255
256 /* no fb bound */
257 if (!crtc->fb) {
258 dev_dbg(dev->dev, "No FB bound\n");
259 goto psb_intel_pipe_cleaner;
260 }
261
262 /* We are displaying this buffer, make sure it is actually loaded
263 into the GTT */
264 ret = psb_gtt_pin(psbfb->gtt);
265 if (ret < 0)
266 goto psb_intel_pipe_set_base_exit;
267 start = psbfb->gtt->offset;
268
269 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
270
271 REG_WRITE(map->stride, crtc->fb->pitches[0]);
272
273 dspcntr = REG_READ(map->cntr);
274 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
275
276 switch (crtc->fb->bits_per_pixel) {
277 case 8:
278 dspcntr |= DISPPLANE_8BPP;
279 break;
280 case 16:
281 if (crtc->fb->depth == 15)
282 dspcntr |= DISPPLANE_15_16BPP;
283 else
284 dspcntr |= DISPPLANE_16BPP;
285 break;
286 case 24:
287 case 32:
288 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
289 break;
290 default:
291 dev_err(dev->dev, "Unknown color depth\n");
292 ret = -EINVAL;
293 psb_gtt_unpin(psbfb->gtt);
294 goto psb_intel_pipe_set_base_exit;
295 }
296 REG_WRITE(map->cntr, dspcntr);
297
298 REG_WRITE(map->base, start + offset);
299 REG_READ(map->base);
300
301psb_intel_pipe_cleaner:
302 /* If there was a previous display we can now unpin it */
303 if (old_fb)
304 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
305
306psb_intel_pipe_set_base_exit:
307 gma_power_end(dev);
308 return ret;
309}
310
311/**
312 * Sets the power management mode of the pipe and plane.
313 *
314 * This code should probably grow support for turning the cursor off and back
315 * on appropriately at the same time as we're turning the pipe off/on.
316 */
317static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
318{
319 struct drm_device *dev = crtc->dev;
320 struct drm_psb_private *dev_priv = dev->dev_private;
321 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
322 int pipe = psb_intel_crtc->pipe;
323 const struct psb_offset *map = &dev_priv->regmap[pipe];
324 u32 temp;
325
326 /* XXX: When our outputs are all unaware of DPMS modes other than off
327 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
328 */
329 switch (mode) {
330 case DRM_MODE_DPMS_ON:
331 case DRM_MODE_DPMS_STANDBY:
332 case DRM_MODE_DPMS_SUSPEND:
333 /* Enable the DPLL */
334 temp = REG_READ(map->dpll);
335 if ((temp & DPLL_VCO_ENABLE) == 0) {
336 REG_WRITE(map->dpll, temp);
337 REG_READ(map->dpll);
338 /* Wait for the clocks to stabilize. */
339 udelay(150);
340 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
341 REG_READ(map->dpll);
342 /* Wait for the clocks to stabilize. */
343 udelay(150);
344 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
345 REG_READ(map->dpll);
346 /* Wait for the clocks to stabilize. */
347 udelay(150);
348 }
349
350 /* Enable the pipe */
351 temp = REG_READ(map->conf);
352 if ((temp & PIPEACONF_ENABLE) == 0)
353 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
354
355 /* Enable the plane */
356 temp = REG_READ(map->cntr);
357 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
358 REG_WRITE(map->cntr,
359 temp | DISPLAY_PLANE_ENABLE);
360 /* Flush the plane changes */
361 REG_WRITE(map->base, REG_READ(map->base));
362 }
363
364 psb_intel_crtc_load_lut(crtc);
365
366 /* Give the overlay scaler a chance to enable
367 * if it's on this pipe */
368 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
369 break;
370 case DRM_MODE_DPMS_OFF:
371 /* Give the overlay scaler a chance to disable
372 * if it's on this pipe */
373 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
374
375 /* Disable the VGA plane that we never use */
376 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
377
378 /* Disable display plane */
379 temp = REG_READ(map->cntr);
380 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
381 REG_WRITE(map->cntr,
382 temp & ~DISPLAY_PLANE_ENABLE);
383 /* Flush the plane changes */
384 REG_WRITE(map->base, REG_READ(map->base));
385 REG_READ(map->base);
386 }
387
388 /* Next, disable display pipes */
389 temp = REG_READ(map->conf);
390 if ((temp & PIPEACONF_ENABLE) != 0) {
391 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
392 REG_READ(map->conf);
393 }
394
395 /* Wait for vblank for the disable to take effect. */
396 psb_intel_wait_for_vblank(dev);
397
398 temp = REG_READ(map->dpll);
399 if ((temp & DPLL_VCO_ENABLE) != 0) {
400 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
401 REG_READ(map->dpll);
402 }
403
404 /* Wait for the clocks to turn off. */
405 udelay(150);
406 break;
407 }
408
409 /*Set FIFO Watermarks*/
410 REG_WRITE(DSPARB, 0x3F3E);
411}
412
413static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
414{
415 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
416 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
417}
418
419static void psb_intel_crtc_commit(struct drm_crtc *crtc)
420{
421 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
422 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
423}
424
425void psb_intel_encoder_prepare(struct drm_encoder *encoder)
426{
427 struct drm_encoder_helper_funcs *encoder_funcs =
428 encoder->helper_private;
429 /* lvds has its own version of prepare see psb_intel_lvds_prepare */
430 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
431}
432
433void psb_intel_encoder_commit(struct drm_encoder *encoder)
434{
435 struct drm_encoder_helper_funcs *encoder_funcs =
436 encoder->helper_private;
437 /* lvds has its own version of commit see psb_intel_lvds_commit */
438 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
439}
440
441void psb_intel_encoder_destroy(struct drm_encoder *encoder)
442{
443 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
444
445 drm_encoder_cleanup(encoder);
446 kfree(intel_encoder);
447}
448
449static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
450 const struct drm_display_mode *mode,
451 struct drm_display_mode *adjusted_mode)
452{
453 return true;
454}
455
456
457/**
458 * Return the pipe currently connected to the panel fitter, 85 * Return the pipe currently connected to the panel fitter,
459 * or -1 if the panel fitter is not present or not in use 86 * or -1 if the panel fitter is not present or not in use
460 */ 87 */
@@ -479,17 +106,18 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
479{ 106{
480 struct drm_device *dev = crtc->dev; 107 struct drm_device *dev = crtc->dev;
481 struct drm_psb_private *dev_priv = dev->dev_private; 108 struct drm_psb_private *dev_priv = dev->dev_private;
482 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 109 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
483 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 110 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
484 int pipe = psb_intel_crtc->pipe; 111 int pipe = gma_crtc->pipe;
485 const struct psb_offset *map = &dev_priv->regmap[pipe]; 112 const struct psb_offset *map = &dev_priv->regmap[pipe];
486 int refclk; 113 int refclk;
487 struct psb_intel_clock_t clock; 114 struct gma_clock_t clock;
488 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 115 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
489 bool ok, is_sdvo = false; 116 bool ok, is_sdvo = false;
490 bool is_lvds = false, is_tv = false; 117 bool is_lvds = false, is_tv = false;
491 struct drm_mode_config *mode_config = &dev->mode_config; 118 struct drm_mode_config *mode_config = &dev->mode_config;
492 struct drm_connector *connector; 119 struct drm_connector *connector;
120 const struct gma_limit_t *limit;
493 121
494 /* No scan out no play */ 122 /* No scan out no play */
495 if (crtc->fb == NULL) { 123 if (crtc->fb == NULL) {
@@ -498,14 +126,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
498 } 126 }
499 127
500 list_for_each_entry(connector, &mode_config->connector_list, head) { 128 list_for_each_entry(connector, &mode_config->connector_list, head) {
501 struct psb_intel_encoder *psb_intel_encoder = 129 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
502 psb_intel_attached_encoder(connector);
503 130
504 if (!connector->encoder 131 if (!connector->encoder
505 || connector->encoder->crtc != crtc) 132 || connector->encoder->crtc != crtc)
506 continue; 133 continue;
507 134
508 switch (psb_intel_encoder->type) { 135 switch (gma_encoder->type) {
509 case INTEL_OUTPUT_LVDS: 136 case INTEL_OUTPUT_LVDS:
510 is_lvds = true; 137 is_lvds = true;
511 break; 138 break;
@@ -520,10 +147,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
520 147
521 refclk = 96000; 148 refclk = 96000;
522 149
523 ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, 150 limit = gma_crtc->clock_funcs->limit(crtc, refclk);
151
152 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
524 &clock); 153 &clock);
525 if (!ok) { 154 if (!ok) {
526 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); 155 DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
156 adjusted_mode->clock, clock.dot);
527 return 0; 157 return 0;
528 } 158 }
529 159
@@ -661,368 +291,29 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
661 REG_WRITE(map->conf, pipeconf); 291 REG_WRITE(map->conf, pipeconf);
662 REG_READ(map->conf); 292 REG_READ(map->conf);
663 293
664 psb_intel_wait_for_vblank(dev); 294 gma_wait_for_vblank(dev);
665 295
666 REG_WRITE(map->cntr, dspcntr); 296 REG_WRITE(map->cntr, dspcntr);
667 297
668 /* Flush the plane changes */ 298 /* Flush the plane changes */
669 crtc_funcs->mode_set_base(crtc, x, y, old_fb); 299 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
670 300
671 psb_intel_wait_for_vblank(dev); 301 gma_wait_for_vblank(dev);
672
673 return 0;
674}
675
676/** Loads the palette/gamma unit for the CRTC with the prepared values */
677void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
678{
679 struct drm_device *dev = crtc->dev;
680 struct drm_psb_private *dev_priv = dev->dev_private;
681 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
682 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
683 int palreg = map->palette;
684 int i;
685
686 /* The clocks have to be on to load the palette. */
687 if (!crtc->enabled)
688 return;
689
690 switch (psb_intel_crtc->pipe) {
691 case 0:
692 case 1:
693 break;
694 default:
695 dev_err(dev->dev, "Illegal Pipe Number.\n");
696 return;
697 }
698
699 if (gma_power_begin(dev, false)) {
700 for (i = 0; i < 256; i++) {
701 REG_WRITE(palreg + 4 * i,
702 ((psb_intel_crtc->lut_r[i] +
703 psb_intel_crtc->lut_adj[i]) << 16) |
704 ((psb_intel_crtc->lut_g[i] +
705 psb_intel_crtc->lut_adj[i]) << 8) |
706 (psb_intel_crtc->lut_b[i] +
707 psb_intel_crtc->lut_adj[i]));
708 }
709 gma_power_end(dev);
710 } else {
711 for (i = 0; i < 256; i++) {
712 dev_priv->regs.pipe[0].palette[i] =
713 ((psb_intel_crtc->lut_r[i] +
714 psb_intel_crtc->lut_adj[i]) << 16) |
715 ((psb_intel_crtc->lut_g[i] +
716 psb_intel_crtc->lut_adj[i]) << 8) |
717 (psb_intel_crtc->lut_b[i] +
718 psb_intel_crtc->lut_adj[i]);
719 }
720
721 }
722}
723
724/**
725 * Save HW states of giving crtc
726 */
727static void psb_intel_crtc_save(struct drm_crtc *crtc)
728{
729 struct drm_device *dev = crtc->dev;
730 struct drm_psb_private *dev_priv = dev->dev_private;
731 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
732 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
733 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
734 uint32_t paletteReg;
735 int i;
736
737 if (!crtc_state) {
738 dev_err(dev->dev, "No CRTC state found\n");
739 return;
740 }
741
742 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
743 crtc_state->savePIPECONF = REG_READ(map->conf);
744 crtc_state->savePIPESRC = REG_READ(map->src);
745 crtc_state->saveFP0 = REG_READ(map->fp0);
746 crtc_state->saveFP1 = REG_READ(map->fp1);
747 crtc_state->saveDPLL = REG_READ(map->dpll);
748 crtc_state->saveHTOTAL = REG_READ(map->htotal);
749 crtc_state->saveHBLANK = REG_READ(map->hblank);
750 crtc_state->saveHSYNC = REG_READ(map->hsync);
751 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
752 crtc_state->saveVBLANK = REG_READ(map->vblank);
753 crtc_state->saveVSYNC = REG_READ(map->vsync);
754 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
755
756 /*NOTE: DSPSIZE DSPPOS only for psb*/
757 crtc_state->saveDSPSIZE = REG_READ(map->size);
758 crtc_state->saveDSPPOS = REG_READ(map->pos);
759
760 crtc_state->saveDSPBASE = REG_READ(map->base);
761
762 paletteReg = map->palette;
763 for (i = 0; i < 256; ++i)
764 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
765}
766
767/**
768 * Restore HW states of giving crtc
769 */
770static void psb_intel_crtc_restore(struct drm_crtc *crtc)
771{
772 struct drm_device *dev = crtc->dev;
773 struct drm_psb_private *dev_priv = dev->dev_private;
774 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
775 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
776 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
777 uint32_t paletteReg;
778 int i;
779
780 if (!crtc_state) {
781 dev_err(dev->dev, "No crtc state\n");
782 return;
783 }
784
785 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
786 REG_WRITE(map->dpll,
787 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
788 REG_READ(map->dpll);
789 udelay(150);
790 }
791
792 REG_WRITE(map->fp0, crtc_state->saveFP0);
793 REG_READ(map->fp0);
794
795 REG_WRITE(map->fp1, crtc_state->saveFP1);
796 REG_READ(map->fp1);
797
798 REG_WRITE(map->dpll, crtc_state->saveDPLL);
799 REG_READ(map->dpll);
800 udelay(150);
801
802 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
803 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
804 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
805 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
806 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
807 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
808 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
809
810 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
811 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
812
813 REG_WRITE(map->src, crtc_state->savePIPESRC);
814 REG_WRITE(map->base, crtc_state->saveDSPBASE);
815 REG_WRITE(map->conf, crtc_state->savePIPECONF);
816
817 psb_intel_wait_for_vblank(dev);
818
819 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
820 REG_WRITE(map->base, crtc_state->saveDSPBASE);
821
822 psb_intel_wait_for_vblank(dev);
823
824 paletteReg = map->palette;
825 for (i = 0; i < 256; ++i)
826 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
827}
828
829static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
830 struct drm_file *file_priv,
831 uint32_t handle,
832 uint32_t width, uint32_t height)
833{
834 struct drm_device *dev = crtc->dev;
835 struct drm_psb_private *dev_priv = dev->dev_private;
836 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
837 int pipe = psb_intel_crtc->pipe;
838 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
839 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
840 uint32_t temp;
841 size_t addr = 0;
842 struct gtt_range *gt;
843 struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
844 struct drm_gem_object *obj;
845 void *tmp_dst, *tmp_src;
846 int ret = 0, i, cursor_pages;
847
848 /* if we want to turn of the cursor ignore width and height */
849 if (!handle) {
850 /* turn off the cursor */
851 temp = CURSOR_MODE_DISABLE;
852
853 if (gma_power_begin(dev, false)) {
854 REG_WRITE(control, temp);
855 REG_WRITE(base, 0);
856 gma_power_end(dev);
857 }
858
859 /* Unpin the old GEM object */
860 if (psb_intel_crtc->cursor_obj) {
861 gt = container_of(psb_intel_crtc->cursor_obj,
862 struct gtt_range, gem);
863 psb_gtt_unpin(gt);
864 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
865 psb_intel_crtc->cursor_obj = NULL;
866 }
867
868 return 0;
869 }
870
871 /* Currently we only support 64x64 cursors */
872 if (width != 64 || height != 64) {
873 dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
874 return -EINVAL;
875 }
876
877 obj = drm_gem_object_lookup(dev, file_priv, handle);
878 if (!obj)
879 return -ENOENT;
880
881 if (obj->size < width * height * 4) {
882 dev_dbg(dev->dev, "buffer is to small\n");
883 ret = -ENOMEM;
884 goto unref_cursor;
885 }
886
887 gt = container_of(obj, struct gtt_range, gem);
888
889 /* Pin the memory into the GTT */
890 ret = psb_gtt_pin(gt);
891 if (ret) {
892 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
893 goto unref_cursor;
894 }
895
896 if (dev_priv->ops->cursor_needs_phys) {
897 if (cursor_gt == NULL) {
898 dev_err(dev->dev, "No hardware cursor mem available");
899 ret = -ENOMEM;
900 goto unref_cursor;
901 }
902
903 /* Prevent overflow */
904 if (gt->npage > 4)
905 cursor_pages = 4;
906 else
907 cursor_pages = gt->npage;
908
909 /* Copy the cursor to cursor mem */
910 tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
911 for (i = 0; i < cursor_pages; i++) {
912 tmp_src = kmap(gt->pages[i]);
913 memcpy(tmp_dst, tmp_src, PAGE_SIZE);
914 kunmap(gt->pages[i]);
915 tmp_dst += PAGE_SIZE;
916 }
917
918 addr = psb_intel_crtc->cursor_addr;
919 } else {
920 addr = gt->offset; /* Or resource.start ??? */
921 psb_intel_crtc->cursor_addr = addr;
922 }
923
924 temp = 0;
925 /* set the pipe for the cursor */
926 temp |= (pipe << 28);
927 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
928
929 if (gma_power_begin(dev, false)) {
930 REG_WRITE(control, temp);
931 REG_WRITE(base, addr);
932 gma_power_end(dev);
933 }
934
935 /* unpin the old bo */
936 if (psb_intel_crtc->cursor_obj) {
937 gt = container_of(psb_intel_crtc->cursor_obj,
938 struct gtt_range, gem);
939 psb_gtt_unpin(gt);
940 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
941 }
942
943 psb_intel_crtc->cursor_obj = obj;
944 return ret;
945
946unref_cursor:
947 drm_gem_object_unreference(obj);
948 return ret;
949}
950
951static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
952{
953 struct drm_device *dev = crtc->dev;
954 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
955 int pipe = psb_intel_crtc->pipe;
956 uint32_t temp = 0;
957 uint32_t addr;
958
959
960 if (x < 0) {
961 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
962 x = -x;
963 }
964 if (y < 0) {
965 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
966 y = -y;
967 }
968
969 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
970 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
971
972 addr = psb_intel_crtc->cursor_addr;
973 302
974 if (gma_power_begin(dev, false)) {
975 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
976 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
977 gma_power_end(dev);
978 }
979 return 0; 303 return 0;
980} 304}
981 305
982static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
983 u16 *green, u16 *blue, uint32_t type, uint32_t size)
984{
985 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
986 int i;
987
988 if (size != 256)
989 return;
990
991 for (i = 0; i < 256; i++) {
992 psb_intel_crtc->lut_r[i] = red[i] >> 8;
993 psb_intel_crtc->lut_g[i] = green[i] >> 8;
994 psb_intel_crtc->lut_b[i] = blue[i] >> 8;
995 }
996
997 psb_intel_crtc_load_lut(crtc);
998}
999
1000static int psb_crtc_set_config(struct drm_mode_set *set)
1001{
1002 int ret;
1003 struct drm_device *dev = set->crtc->dev;
1004 struct drm_psb_private *dev_priv = dev->dev_private;
1005
1006 if (!dev_priv->rpm_enabled)
1007 return drm_crtc_helper_set_config(set);
1008
1009 pm_runtime_forbid(&dev->pdev->dev);
1010 ret = drm_crtc_helper_set_config(set);
1011 pm_runtime_allow(&dev->pdev->dev);
1012 return ret;
1013}
1014
1015/* Returns the clock of the currently programmed mode of the given pipe. */ 306/* Returns the clock of the currently programmed mode of the given pipe. */
1016static int psb_intel_crtc_clock_get(struct drm_device *dev, 307static int psb_intel_crtc_clock_get(struct drm_device *dev,
1017 struct drm_crtc *crtc) 308 struct drm_crtc *crtc)
1018{ 309{
1019 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 310 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1020 struct drm_psb_private *dev_priv = dev->dev_private; 311 struct drm_psb_private *dev_priv = dev->dev_private;
1021 int pipe = psb_intel_crtc->pipe; 312 int pipe = gma_crtc->pipe;
1022 const struct psb_offset *map = &dev_priv->regmap[pipe]; 313 const struct psb_offset *map = &dev_priv->regmap[pipe];
1023 u32 dpll; 314 u32 dpll;
1024 u32 fp; 315 u32 fp;
1025 struct psb_intel_clock_t clock; 316 struct gma_clock_t clock;
1026 bool is_lvds; 317 bool is_lvds;
1027 struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; 318 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1028 319
@@ -1092,8 +383,8 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
1092struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, 383struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1093 struct drm_crtc *crtc) 384 struct drm_crtc *crtc)
1094{ 385{
1095 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 386 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1096 int pipe = psb_intel_crtc->pipe; 387 int pipe = gma_crtc->pipe;
1097 struct drm_display_mode *mode; 388 struct drm_display_mode *mode;
1098 int htot; 389 int htot;
1099 int hsync; 390 int hsync;
@@ -1136,58 +427,30 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1136 return mode; 427 return mode;
1137} 428}
1138 429
1139static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1140{
1141 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1142 struct gtt_range *gt;
1143
1144 /* Unpin the old GEM object */
1145 if (psb_intel_crtc->cursor_obj) {
1146 gt = container_of(psb_intel_crtc->cursor_obj,
1147 struct gtt_range, gem);
1148 psb_gtt_unpin(gt);
1149 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1150 psb_intel_crtc->cursor_obj = NULL;
1151 }
1152
1153 if (psb_intel_crtc->cursor_gt != NULL)
1154 psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt);
1155 kfree(psb_intel_crtc->crtc_state);
1156 drm_crtc_cleanup(crtc);
1157 kfree(psb_intel_crtc);
1158}
1159
1160static void psb_intel_crtc_disable(struct drm_crtc *crtc)
1161{
1162 struct gtt_range *gt;
1163 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1164
1165 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1166
1167 if (crtc->fb) {
1168 gt = to_psb_fb(crtc->fb)->gtt;
1169 psb_gtt_unpin(gt);
1170 }
1171}
1172
1173const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { 430const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1174 .dpms = psb_intel_crtc_dpms, 431 .dpms = gma_crtc_dpms,
1175 .mode_fixup = psb_intel_crtc_mode_fixup, 432 .mode_fixup = gma_crtc_mode_fixup,
1176 .mode_set = psb_intel_crtc_mode_set, 433 .mode_set = psb_intel_crtc_mode_set,
1177 .mode_set_base = psb_intel_pipe_set_base, 434 .mode_set_base = gma_pipe_set_base,
1178 .prepare = psb_intel_crtc_prepare, 435 .prepare = gma_crtc_prepare,
1179 .commit = psb_intel_crtc_commit, 436 .commit = gma_crtc_commit,
1180 .disable = psb_intel_crtc_disable, 437 .disable = gma_crtc_disable,
1181}; 438};
1182 439
1183const struct drm_crtc_funcs psb_intel_crtc_funcs = { 440const struct drm_crtc_funcs psb_intel_crtc_funcs = {
1184 .save = psb_intel_crtc_save, 441 .save = gma_crtc_save,
1185 .restore = psb_intel_crtc_restore, 442 .restore = gma_crtc_restore,
1186 .cursor_set = psb_intel_crtc_cursor_set, 443 .cursor_set = gma_crtc_cursor_set,
1187 .cursor_move = psb_intel_crtc_cursor_move, 444 .cursor_move = gma_crtc_cursor_move,
1188 .gamma_set = psb_intel_crtc_gamma_set, 445 .gamma_set = gma_crtc_gamma_set,
1189 .set_config = psb_crtc_set_config, 446 .set_config = gma_crtc_set_config,
1190 .destroy = psb_intel_crtc_destroy, 447 .destroy = gma_crtc_destroy,
448};
449
450const struct gma_clock_funcs psb_clock_funcs = {
451 .clock = psb_intel_clock,
452 .limit = psb_intel_limit,
453 .pll_is_valid = gma_pll_is_valid,
1191}; 454};
1192 455
1193/* 456/*
@@ -1195,7 +458,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
1195 * to zero. This is a workaround for h/w defect on Oaktrail 458 * to zero. This is a workaround for h/w defect on Oaktrail
1196 */ 459 */
1197static void psb_intel_cursor_init(struct drm_device *dev, 460static void psb_intel_cursor_init(struct drm_device *dev,
1198 struct psb_intel_crtc *psb_intel_crtc) 461 struct gma_crtc *gma_crtc)
1199{ 462{
1200 struct drm_psb_private *dev_priv = dev->dev_private; 463 struct drm_psb_private *dev_priv = dev->dev_private;
1201 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR }; 464 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
@@ -1208,88 +471,87 @@ static void psb_intel_cursor_init(struct drm_device *dev,
1208 */ 471 */
1209 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1); 472 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
1210 if (!cursor_gt) { 473 if (!cursor_gt) {
1211 psb_intel_crtc->cursor_gt = NULL; 474 gma_crtc->cursor_gt = NULL;
1212 goto out; 475 goto out;
1213 } 476 }
1214 psb_intel_crtc->cursor_gt = cursor_gt; 477 gma_crtc->cursor_gt = cursor_gt;
1215 psb_intel_crtc->cursor_addr = dev_priv->stolen_base + 478 gma_crtc->cursor_addr = dev_priv->stolen_base +
1216 cursor_gt->offset; 479 cursor_gt->offset;
1217 } else { 480 } else {
1218 psb_intel_crtc->cursor_gt = NULL; 481 gma_crtc->cursor_gt = NULL;
1219 } 482 }
1220 483
1221out: 484out:
1222 REG_WRITE(control[psb_intel_crtc->pipe], 0); 485 REG_WRITE(control[gma_crtc->pipe], 0);
1223 REG_WRITE(base[psb_intel_crtc->pipe], 0); 486 REG_WRITE(base[gma_crtc->pipe], 0);
1224} 487}
1225 488
1226void psb_intel_crtc_init(struct drm_device *dev, int pipe, 489void psb_intel_crtc_init(struct drm_device *dev, int pipe,
1227 struct psb_intel_mode_device *mode_dev) 490 struct psb_intel_mode_device *mode_dev)
1228{ 491{
1229 struct drm_psb_private *dev_priv = dev->dev_private; 492 struct drm_psb_private *dev_priv = dev->dev_private;
1230 struct psb_intel_crtc *psb_intel_crtc; 493 struct gma_crtc *gma_crtc;
1231 int i; 494 int i;
1232 uint16_t *r_base, *g_base, *b_base; 495 uint16_t *r_base, *g_base, *b_base;
1233 496
1234 /* We allocate a extra array of drm_connector pointers 497 /* We allocate a extra array of drm_connector pointers
1235 * for fbdev after the crtc */ 498 * for fbdev after the crtc */
1236 psb_intel_crtc = 499 gma_crtc = kzalloc(sizeof(struct gma_crtc) +
1237 kzalloc(sizeof(struct psb_intel_crtc) + 500 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
1238 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), 501 GFP_KERNEL);
1239 GFP_KERNEL); 502 if (gma_crtc == NULL)
1240 if (psb_intel_crtc == NULL)
1241 return; 503 return;
1242 504
1243 psb_intel_crtc->crtc_state = 505 gma_crtc->crtc_state =
1244 kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL); 506 kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
1245 if (!psb_intel_crtc->crtc_state) { 507 if (!gma_crtc->crtc_state) {
1246 dev_err(dev->dev, "Crtc state error: No memory\n"); 508 dev_err(dev->dev, "Crtc state error: No memory\n");
1247 kfree(psb_intel_crtc); 509 kfree(gma_crtc);
1248 return; 510 return;
1249 } 511 }
1250 512
1251 /* Set the CRTC operations from the chip specific data */ 513 /* Set the CRTC operations from the chip specific data */
1252 drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs); 514 drm_crtc_init(dev, &gma_crtc->base, dev_priv->ops->crtc_funcs);
1253 515
1254 drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256); 516 /* Set the CRTC clock functions from chip specific data */
1255 psb_intel_crtc->pipe = pipe; 517 gma_crtc->clock_funcs = dev_priv->ops->clock_funcs;
1256 psb_intel_crtc->plane = pipe;
1257 518
1258 r_base = psb_intel_crtc->base.gamma_store; 519 drm_mode_crtc_set_gamma_size(&gma_crtc->base, 256);
520 gma_crtc->pipe = pipe;
521 gma_crtc->plane = pipe;
522
523 r_base = gma_crtc->base.gamma_store;
1259 g_base = r_base + 256; 524 g_base = r_base + 256;
1260 b_base = g_base + 256; 525 b_base = g_base + 256;
1261 for (i = 0; i < 256; i++) { 526 for (i = 0; i < 256; i++) {
1262 psb_intel_crtc->lut_r[i] = i; 527 gma_crtc->lut_r[i] = i;
1263 psb_intel_crtc->lut_g[i] = i; 528 gma_crtc->lut_g[i] = i;
1264 psb_intel_crtc->lut_b[i] = i; 529 gma_crtc->lut_b[i] = i;
1265 r_base[i] = i << 8; 530 r_base[i] = i << 8;
1266 g_base[i] = i << 8; 531 g_base[i] = i << 8;
1267 b_base[i] = i << 8; 532 b_base[i] = i << 8;
1268 533
1269 psb_intel_crtc->lut_adj[i] = 0; 534 gma_crtc->lut_adj[i] = 0;
1270 } 535 }
1271 536
1272 psb_intel_crtc->mode_dev = mode_dev; 537 gma_crtc->mode_dev = mode_dev;
1273 psb_intel_crtc->cursor_addr = 0; 538 gma_crtc->cursor_addr = 0;
1274 539
1275 drm_crtc_helper_add(&psb_intel_crtc->base, 540 drm_crtc_helper_add(&gma_crtc->base,
1276 dev_priv->ops->crtc_helper); 541 dev_priv->ops->crtc_helper);
1277 542
1278 /* Setup the array of drm_connector pointer array */ 543 /* Setup the array of drm_connector pointer array */
1279 psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base; 544 gma_crtc->mode_set.crtc = &gma_crtc->base;
1280 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 545 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
1281 dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL); 546 dev_priv->plane_to_crtc_mapping[gma_crtc->plane] != NULL);
1282 dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] = 547 dev_priv->plane_to_crtc_mapping[gma_crtc->plane] = &gma_crtc->base;
1283 &psb_intel_crtc->base; 548 dev_priv->pipe_to_crtc_mapping[gma_crtc->pipe] = &gma_crtc->base;
1284 dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] = 549 gma_crtc->mode_set.connectors = (struct drm_connector **)(gma_crtc + 1);
1285 &psb_intel_crtc->base; 550 gma_crtc->mode_set.num_connectors = 0;
1286 psb_intel_crtc->mode_set.connectors = 551 psb_intel_cursor_init(dev, gma_crtc);
1287 (struct drm_connector **) (psb_intel_crtc + 1);
1288 psb_intel_crtc->mode_set.num_connectors = 0;
1289 psb_intel_cursor_init(dev, psb_intel_crtc);
1290 552
1291 /* Set to true so that the pipe is forced off on initial config. */ 553 /* Set to true so that the pipe is forced off on initial config. */
1292 psb_intel_crtc->active = true; 554 gma_crtc->active = true;
1293} 555}
1294 556
1295int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 557int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -1298,7 +560,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
1298 struct drm_psb_private *dev_priv = dev->dev_private; 560 struct drm_psb_private *dev_priv = dev->dev_private;
1299 struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data; 561 struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
1300 struct drm_mode_object *drmmode_obj; 562 struct drm_mode_object *drmmode_obj;
1301 struct psb_intel_crtc *crtc; 563 struct gma_crtc *crtc;
1302 564
1303 if (!dev_priv) { 565 if (!dev_priv) {
1304 dev_err(dev->dev, "called with no initialization\n"); 566 dev_err(dev->dev, "called with no initialization\n");
@@ -1313,7 +575,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
1313 return -EINVAL; 575 return -EINVAL;
1314 } 576 }
1315 577
1316 crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj)); 578 crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
1317 pipe_from_crtc_id->pipe = crtc->pipe; 579 pipe_from_crtc_id->pipe = crtc->pipe;
1318 580
1319 return 0; 581 return 0;
@@ -1324,14 +586,14 @@ struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
1324 struct drm_crtc *crtc = NULL; 586 struct drm_crtc *crtc = NULL;
1325 587
1326 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 588 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1327 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 589 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1328 if (psb_intel_crtc->pipe == pipe) 590 if (gma_crtc->pipe == pipe)
1329 break; 591 break;
1330 } 592 }
1331 return crtc; 593 return crtc;
1332} 594}
1333 595
1334int psb_intel_connector_clones(struct drm_device *dev, int type_mask) 596int gma_connector_clones(struct drm_device *dev, int type_mask)
1335{ 597{
1336 int index_mask = 0; 598 int index_mask = 0;
1337 struct drm_connector *connector; 599 struct drm_connector *connector;
@@ -1339,30 +601,10 @@ int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
1339 601
1340 list_for_each_entry(connector, &dev->mode_config.connector_list, 602 list_for_each_entry(connector, &dev->mode_config.connector_list,
1341 head) { 603 head) {
1342 struct psb_intel_encoder *psb_intel_encoder = 604 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
1343 psb_intel_attached_encoder(connector); 605 if (type_mask & (1 << gma_encoder->type))
1344 if (type_mask & (1 << psb_intel_encoder->type))
1345 index_mask |= (1 << entry); 606 index_mask |= (1 << entry);
1346 entry++; 607 entry++;
1347 } 608 }
1348 return index_mask; 609 return index_mask;
1349} 610}
1350
1351/* current intel driver doesn't take advantage of encoders
1352 always give back the encoder for the connector
1353*/
1354struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
1355{
1356 struct psb_intel_encoder *psb_intel_encoder =
1357 psb_intel_attached_encoder(connector);
1358
1359 return &psb_intel_encoder->base;
1360}
1361
1362void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
1363 struct psb_intel_encoder *encoder)
1364{
1365 connector->encoder = encoder;
1366 drm_mode_connector_attach_encoder(&connector->base,
1367 &encoder->base);
1368}
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 4dcae421a58d..bde27fdb41bf 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -24,6 +24,7 @@
24#include <drm/drm_crtc.h> 24#include <drm/drm_crtc.h>
25#include <drm/drm_crtc_helper.h> 25#include <drm/drm_crtc_helper.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include "gma_display.h"
27 28
28/* 29/*
29 * Display related stuff 30 * Display related stuff
@@ -116,11 +117,11 @@ struct psb_intel_i2c_chan {
116 u8 slave_addr; 117 u8 slave_addr;
117}; 118};
118 119
119struct psb_intel_encoder { 120struct gma_encoder {
120 struct drm_encoder base; 121 struct drm_encoder base;
121 int type; 122 int type;
122 bool needs_tv_clock; 123 bool needs_tv_clock;
123 void (*hot_plug)(struct psb_intel_encoder *); 124 void (*hot_plug)(struct gma_encoder *);
124 int crtc_mask; 125 int crtc_mask;
125 int clone_mask; 126 int clone_mask;
126 u32 ddi_select; /* Channel info */ 127 u32 ddi_select; /* Channel info */
@@ -136,9 +137,9 @@ struct psb_intel_encoder {
136 struct psb_intel_i2c_chan *ddc_bus; 137 struct psb_intel_i2c_chan *ddc_bus;
137}; 138};
138 139
139struct psb_intel_connector { 140struct gma_connector {
140 struct drm_connector base; 141 struct drm_connector base;
141 struct psb_intel_encoder *encoder; 142 struct gma_encoder *encoder;
142}; 143};
143 144
144struct psb_intel_crtc_state { 145struct psb_intel_crtc_state {
@@ -161,7 +162,7 @@ struct psb_intel_crtc_state {
161 uint32_t savePalette[256]; 162 uint32_t savePalette[256];
162}; 163};
163 164
164struct psb_intel_crtc { 165struct gma_crtc {
165 struct drm_crtc base; 166 struct drm_crtc base;
166 int pipe; 167 int pipe;
167 int plane; 168 int plane;
@@ -188,14 +189,16 @@ struct psb_intel_crtc {
188 189
189 /* Saved Crtc HW states */ 190 /* Saved Crtc HW states */
190 struct psb_intel_crtc_state *crtc_state; 191 struct psb_intel_crtc_state *crtc_state;
192
193 const struct gma_clock_funcs *clock_funcs;
191}; 194};
192 195
193#define to_psb_intel_crtc(x) \ 196#define to_gma_crtc(x) \
194 container_of(x, struct psb_intel_crtc, base) 197 container_of(x, struct gma_crtc, base)
195#define to_psb_intel_connector(x) \ 198#define to_gma_connector(x) \
196 container_of(x, struct psb_intel_connector, base) 199 container_of(x, struct gma_connector, base)
197#define to_psb_intel_encoder(x) \ 200#define to_gma_encoder(x) \
198 container_of(x, struct psb_intel_encoder, base) 201 container_of(x, struct gma_encoder, base)
199#define to_psb_intel_framebuffer(x) \ 202#define to_psb_intel_framebuffer(x) \
200 container_of(x, struct psb_intel_framebuffer, base) 203 container_of(x, struct psb_intel_framebuffer, base)
201 204
@@ -223,27 +226,18 @@ extern void oaktrail_dsi_init(struct drm_device *dev,
223extern void mid_dsi_init(struct drm_device *dev, 226extern void mid_dsi_init(struct drm_device *dev,
224 struct psb_intel_mode_device *mode_dev, int dsi_num); 227 struct psb_intel_mode_device *mode_dev, int dsi_num);
225 228
226extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc); 229extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector);
227extern void psb_intel_encoder_prepare(struct drm_encoder *encoder); 230extern void gma_connector_attach_encoder(struct gma_connector *connector,
228extern void psb_intel_encoder_commit(struct drm_encoder *encoder); 231 struct gma_encoder *encoder);
229extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
230 232
231static inline struct psb_intel_encoder *psb_intel_attached_encoder( 233static inline struct gma_encoder *gma_attached_encoder(
232 struct drm_connector *connector) 234 struct drm_connector *connector)
233{ 235{
234 return to_psb_intel_connector(connector)->encoder; 236 return to_gma_connector(connector)->encoder;
235} 237}
236 238
237extern void psb_intel_connector_attach_encoder(
238 struct psb_intel_connector *connector,
239 struct psb_intel_encoder *encoder);
240
241extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
242 *connector);
243
244extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, 239extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
245 struct drm_crtc *crtc); 240 struct drm_crtc *crtc);
246extern void psb_intel_wait_for_vblank(struct drm_device *dev);
247extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 241extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
248 struct drm_file *file_priv); 242 struct drm_file *file_priv);
249extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, 243extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 9fa5fa2e6192..32342f6990d9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -267,10 +267,9 @@ static void psb_intel_lvds_save(struct drm_connector *connector)
267 struct drm_device *dev = connector->dev; 267 struct drm_device *dev = connector->dev;
268 struct drm_psb_private *dev_priv = 268 struct drm_psb_private *dev_priv =
269 (struct drm_psb_private *)dev->dev_private; 269 (struct drm_psb_private *)dev->dev_private;
270 struct psb_intel_encoder *psb_intel_encoder = 270 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
271 psb_intel_attached_encoder(connector);
272 struct psb_intel_lvds_priv *lvds_priv = 271 struct psb_intel_lvds_priv *lvds_priv =
273 (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; 272 (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
274 273
275 lvds_priv->savePP_ON = REG_READ(LVDSPP_ON); 274 lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
276 lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF); 275 lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
@@ -307,10 +306,9 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
307{ 306{
308 struct drm_device *dev = connector->dev; 307 struct drm_device *dev = connector->dev;
309 u32 pp_status; 308 u32 pp_status;
310 struct psb_intel_encoder *psb_intel_encoder = 309 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
311 psb_intel_attached_encoder(connector);
312 struct psb_intel_lvds_priv *lvds_priv = 310 struct psb_intel_lvds_priv *lvds_priv =
313 (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; 311 (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
314 312
315 dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", 313 dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
316 lvds_priv->savePP_ON, 314 lvds_priv->savePP_ON,
@@ -349,12 +347,11 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector,
349 struct drm_display_mode *mode) 347 struct drm_display_mode *mode)
350{ 348{
351 struct drm_psb_private *dev_priv = connector->dev->dev_private; 349 struct drm_psb_private *dev_priv = connector->dev->dev_private;
352 struct psb_intel_encoder *psb_intel_encoder = 350 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
353 psb_intel_attached_encoder(connector);
354 struct drm_display_mode *fixed_mode = 351 struct drm_display_mode *fixed_mode =
355 dev_priv->mode_dev.panel_fixed_mode; 352 dev_priv->mode_dev.panel_fixed_mode;
356 353
357 if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2) 354 if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
358 fixed_mode = dev_priv->mode_dev.panel_fixed_mode2; 355 fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
359 356
360 /* just in case */ 357 /* just in case */
@@ -381,22 +378,20 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
381 struct drm_device *dev = encoder->dev; 378 struct drm_device *dev = encoder->dev;
382 struct drm_psb_private *dev_priv = dev->dev_private; 379 struct drm_psb_private *dev_priv = dev->dev_private;
383 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 380 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
384 struct psb_intel_crtc *psb_intel_crtc = 381 struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
385 to_psb_intel_crtc(encoder->crtc);
386 struct drm_encoder *tmp_encoder; 382 struct drm_encoder *tmp_encoder;
387 struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode; 383 struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
388 struct psb_intel_encoder *psb_intel_encoder = 384 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
389 to_psb_intel_encoder(encoder);
390 385
391 if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2) 386 if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
392 panel_fixed_mode = mode_dev->panel_fixed_mode2; 387 panel_fixed_mode = mode_dev->panel_fixed_mode2;
393 388
394 /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */ 389 /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
395 if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) { 390 if (!IS_MRST(dev) && gma_crtc->pipe == 0) {
396 printk(KERN_ERR "Can't support LVDS on pipe A\n"); 391 printk(KERN_ERR "Can't support LVDS on pipe A\n");
397 return false; 392 return false;
398 } 393 }
399 if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) { 394 if (IS_MRST(dev) && gma_crtc->pipe != 0) {
400 printk(KERN_ERR "Must use PIPE A\n"); 395 printk(KERN_ERR "Must use PIPE A\n");
401 return false; 396 return false;
402 } 397 }
@@ -525,9 +520,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
525 struct drm_device *dev = connector->dev; 520 struct drm_device *dev = connector->dev;
526 struct drm_psb_private *dev_priv = dev->dev_private; 521 struct drm_psb_private *dev_priv = dev->dev_private;
527 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 522 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
528 struct psb_intel_encoder *psb_intel_encoder = 523 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
529 psb_intel_attached_encoder(connector); 524 struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
530 struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
531 int ret = 0; 525 int ret = 0;
532 526
533 if (!IS_MRST(dev)) 527 if (!IS_MRST(dev))
@@ -564,9 +558,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
564 */ 558 */
565void psb_intel_lvds_destroy(struct drm_connector *connector) 559void psb_intel_lvds_destroy(struct drm_connector *connector)
566{ 560{
567 struct psb_intel_encoder *psb_intel_encoder = 561 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
568 psb_intel_attached_encoder(connector); 562 struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
569 struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
570 563
571 if (lvds_priv->ddc_bus) 564 if (lvds_priv->ddc_bus)
572 psb_intel_i2c_destroy(lvds_priv->ddc_bus); 565 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
@@ -585,8 +578,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
585 return -1; 578 return -1;
586 579
587 if (!strcmp(property->name, "scaling mode")) { 580 if (!strcmp(property->name, "scaling mode")) {
588 struct psb_intel_crtc *crtc = 581 struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
589 to_psb_intel_crtc(encoder->crtc);
590 uint64_t curval; 582 uint64_t curval;
591 583
592 if (!crtc) 584 if (!crtc)
@@ -656,7 +648,7 @@ const struct drm_connector_helper_funcs
656 psb_intel_lvds_connector_helper_funcs = { 648 psb_intel_lvds_connector_helper_funcs = {
657 .get_modes = psb_intel_lvds_get_modes, 649 .get_modes = psb_intel_lvds_get_modes,
658 .mode_valid = psb_intel_lvds_mode_valid, 650 .mode_valid = psb_intel_lvds_mode_valid,
659 .best_encoder = psb_intel_best_encoder, 651 .best_encoder = gma_best_encoder,
660}; 652};
661 653
662const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { 654const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
@@ -691,8 +683,8 @@ const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
691void psb_intel_lvds_init(struct drm_device *dev, 683void psb_intel_lvds_init(struct drm_device *dev,
692 struct psb_intel_mode_device *mode_dev) 684 struct psb_intel_mode_device *mode_dev)
693{ 685{
694 struct psb_intel_encoder *psb_intel_encoder; 686 struct gma_encoder *gma_encoder;
695 struct psb_intel_connector *psb_intel_connector; 687 struct gma_connector *gma_connector;
696 struct psb_intel_lvds_priv *lvds_priv; 688 struct psb_intel_lvds_priv *lvds_priv;
697 struct drm_connector *connector; 689 struct drm_connector *connector;
698 struct drm_encoder *encoder; 690 struct drm_encoder *encoder;
@@ -702,17 +694,15 @@ void psb_intel_lvds_init(struct drm_device *dev,
702 u32 lvds; 694 u32 lvds;
703 int pipe; 695 int pipe;
704 696
705 psb_intel_encoder = 697 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
706 kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 698 if (!gma_encoder) {
707 if (!psb_intel_encoder) { 699 dev_err(dev->dev, "gma_encoder allocation error\n");
708 dev_err(dev->dev, "psb_intel_encoder allocation error\n");
709 return; 700 return;
710 } 701 }
711 702
712 psb_intel_connector = 703 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
713 kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 704 if (!gma_connector) {
714 if (!psb_intel_connector) { 705 dev_err(dev->dev, "gma_connector allocation error\n");
715 dev_err(dev->dev, "psb_intel_connector allocation error\n");
716 goto failed_encoder; 706 goto failed_encoder;
717 } 707 }
718 708
@@ -722,10 +712,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
722 goto failed_connector; 712 goto failed_connector;
723 } 713 }
724 714
725 psb_intel_encoder->dev_priv = lvds_priv; 715 gma_encoder->dev_priv = lvds_priv;
726 716
727 connector = &psb_intel_connector->base; 717 connector = &gma_connector->base;
728 encoder = &psb_intel_encoder->base; 718 encoder = &gma_encoder->base;
729 drm_connector_init(dev, connector, 719 drm_connector_init(dev, connector,
730 &psb_intel_lvds_connector_funcs, 720 &psb_intel_lvds_connector_funcs,
731 DRM_MODE_CONNECTOR_LVDS); 721 DRM_MODE_CONNECTOR_LVDS);
@@ -734,9 +724,8 @@ void psb_intel_lvds_init(struct drm_device *dev,
734 &psb_intel_lvds_enc_funcs, 724 &psb_intel_lvds_enc_funcs,
735 DRM_MODE_ENCODER_LVDS); 725 DRM_MODE_ENCODER_LVDS);
736 726
737 psb_intel_connector_attach_encoder(psb_intel_connector, 727 gma_connector_attach_encoder(gma_connector, gma_encoder);
738 psb_intel_encoder); 728 gma_encoder->type = INTEL_OUTPUT_LVDS;
739 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
740 729
741 drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs); 730 drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
742 drm_connector_helper_add(connector, 731 drm_connector_helper_add(connector,
@@ -851,8 +840,8 @@ failed_blc_i2c:
851 drm_encoder_cleanup(encoder); 840 drm_encoder_cleanup(encoder);
852 drm_connector_cleanup(connector); 841 drm_connector_cleanup(connector);
853failed_connector: 842failed_connector:
854 kfree(psb_intel_connector); 843 kfree(gma_connector);
855failed_encoder: 844failed_encoder:
856 kfree(psb_intel_encoder); 845 kfree(gma_encoder);
857} 846}
858 847
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 19e36603b23b..6f01cdf5e125 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -65,7 +65,7 @@ static const char *tv_format_names[] = {
65#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) 65#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
66 66
67struct psb_intel_sdvo { 67struct psb_intel_sdvo {
68 struct psb_intel_encoder base; 68 struct gma_encoder base;
69 69
70 struct i2c_adapter *i2c; 70 struct i2c_adapter *i2c;
71 u8 slave_addr; 71 u8 slave_addr;
@@ -140,7 +140,7 @@ struct psb_intel_sdvo {
140}; 140};
141 141
142struct psb_intel_sdvo_connector { 142struct psb_intel_sdvo_connector {
143 struct psb_intel_connector base; 143 struct gma_connector base;
144 144
145 /* Mark the type of connector */ 145 /* Mark the type of connector */
146 uint16_t output_flag; 146 uint16_t output_flag;
@@ -200,13 +200,13 @@ static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
200 200
201static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) 201static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
202{ 202{
203 return container_of(psb_intel_attached_encoder(connector), 203 return container_of(gma_attached_encoder(connector),
204 struct psb_intel_sdvo, base); 204 struct psb_intel_sdvo, base);
205} 205}
206 206
207static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector) 207static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
208{ 208{
209 return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base); 209 return container_of(to_gma_connector(connector), struct psb_intel_sdvo_connector, base);
210} 210}
211 211
212static bool 212static bool
@@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
500 &status)) 500 &status))
501 goto log_fail; 501 goto log_fail;
502 502
503 while (status == SDVO_CMD_STATUS_PENDING && retry--) { 503 while ((status == SDVO_CMD_STATUS_PENDING ||
504 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
504 udelay(15); 505 udelay(15);
505 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, 506 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
506 SDVO_I2C_CMD_STATUS, 507 SDVO_I2C_CMD_STATUS,
@@ -987,7 +988,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
987{ 988{
988 struct drm_device *dev = encoder->dev; 989 struct drm_device *dev = encoder->dev;
989 struct drm_crtc *crtc = encoder->crtc; 990 struct drm_crtc *crtc = encoder->crtc;
990 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 991 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
991 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder); 992 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
992 u32 sdvox; 993 u32 sdvox;
993 struct psb_intel_sdvo_in_out_map in_out; 994 struct psb_intel_sdvo_in_out_map in_out;
@@ -1070,7 +1071,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
1070 } 1071 }
1071 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 1072 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1072 1073
1073 if (psb_intel_crtc->pipe == 1) 1074 if (gma_crtc->pipe == 1)
1074 sdvox |= SDVO_PIPE_B_SELECT; 1075 sdvox |= SDVO_PIPE_B_SELECT;
1075 if (psb_intel_sdvo->has_hdmi_audio) 1076 if (psb_intel_sdvo->has_hdmi_audio)
1076 sdvox |= SDVO_AUDIO_ENABLE; 1077 sdvox |= SDVO_AUDIO_ENABLE;
@@ -1121,7 +1122,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1121 if ((temp & SDVO_ENABLE) == 0) 1122 if ((temp & SDVO_ENABLE) == 0)
1122 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE); 1123 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
1123 for (i = 0; i < 2; i++) 1124 for (i = 0; i < 2; i++)
1124 psb_intel_wait_for_vblank(dev); 1125 gma_wait_for_vblank(dev);
1125 1126
1126 status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2); 1127 status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
1127 /* Warn if the device reported failure to sync. 1128 /* Warn if the device reported failure to sync.
@@ -1836,10 +1837,8 @@ done:
1836static void psb_intel_sdvo_save(struct drm_connector *connector) 1837static void psb_intel_sdvo_save(struct drm_connector *connector)
1837{ 1838{
1838 struct drm_device *dev = connector->dev; 1839 struct drm_device *dev = connector->dev;
1839 struct psb_intel_encoder *psb_intel_encoder = 1840 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
1840 psb_intel_attached_encoder(connector); 1841 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&gma_encoder->base);
1841 struct psb_intel_sdvo *sdvo =
1842 to_psb_intel_sdvo(&psb_intel_encoder->base);
1843 1842
1844 sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg); 1843 sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
1845} 1844}
@@ -1847,8 +1846,7 @@ static void psb_intel_sdvo_save(struct drm_connector *connector)
1847static void psb_intel_sdvo_restore(struct drm_connector *connector) 1846static void psb_intel_sdvo_restore(struct drm_connector *connector)
1848{ 1847{
1849 struct drm_device *dev = connector->dev; 1848 struct drm_device *dev = connector->dev;
1850 struct drm_encoder *encoder = 1849 struct drm_encoder *encoder = &gma_attached_encoder(connector)->base;
1851 &psb_intel_attached_encoder(connector)->base;
1852 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder); 1850 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
1853 struct drm_crtc *crtc = encoder->crtc; 1851 struct drm_crtc *crtc = encoder->crtc;
1854 1852
@@ -1864,9 +1862,9 @@ static void psb_intel_sdvo_restore(struct drm_connector *connector)
1864static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { 1862static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1865 .dpms = psb_intel_sdvo_dpms, 1863 .dpms = psb_intel_sdvo_dpms,
1866 .mode_fixup = psb_intel_sdvo_mode_fixup, 1864 .mode_fixup = psb_intel_sdvo_mode_fixup,
1867 .prepare = psb_intel_encoder_prepare, 1865 .prepare = gma_encoder_prepare,
1868 .mode_set = psb_intel_sdvo_mode_set, 1866 .mode_set = psb_intel_sdvo_mode_set,
1869 .commit = psb_intel_encoder_commit, 1867 .commit = gma_encoder_commit,
1870}; 1868};
1871 1869
1872static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { 1870static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
@@ -1882,7 +1880,7 @@ static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
1882static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = { 1880static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
1883 .get_modes = psb_intel_sdvo_get_modes, 1881 .get_modes = psb_intel_sdvo_get_modes,
1884 .mode_valid = psb_intel_sdvo_mode_valid, 1882 .mode_valid = psb_intel_sdvo_mode_valid,
1885 .best_encoder = psb_intel_best_encoder, 1883 .best_encoder = gma_best_encoder,
1886}; 1884};
1887 1885
1888static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder) 1886static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -1894,7 +1892,7 @@ static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
1894 psb_intel_sdvo->sdvo_lvds_fixed_mode); 1892 psb_intel_sdvo->sdvo_lvds_fixed_mode);
1895 1893
1896 i2c_del_adapter(&psb_intel_sdvo->ddc); 1894 i2c_del_adapter(&psb_intel_sdvo->ddc);
1897 psb_intel_encoder_destroy(encoder); 1895 gma_encoder_destroy(encoder);
1898} 1896}
1899 1897
1900static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = { 1898static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
@@ -2055,7 +2053,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
2055 connector->base.base.doublescan_allowed = 0; 2053 connector->base.base.doublescan_allowed = 0;
2056 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2054 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2057 2055
2058 psb_intel_connector_attach_encoder(&connector->base, &encoder->base); 2056 gma_connector_attach_encoder(&connector->base, &encoder->base);
2059 drm_sysfs_connector_add(&connector->base.base); 2057 drm_sysfs_connector_add(&connector->base.base);
2060} 2058}
2061 2059
@@ -2075,7 +2073,7 @@ psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2075{ 2073{
2076 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2074 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2077 struct drm_connector *connector; 2075 struct drm_connector *connector;
2078 struct psb_intel_connector *intel_connector; 2076 struct gma_connector *intel_connector;
2079 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2077 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2080 2078
2081 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2079 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2115,7 +2113,7 @@ psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
2115{ 2113{
2116 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2114 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2117 struct drm_connector *connector; 2115 struct drm_connector *connector;
2118 struct psb_intel_connector *intel_connector; 2116 struct gma_connector *intel_connector;
2119 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2117 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2120 2118
2121 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2119 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2154,7 +2152,7 @@ psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2154{ 2152{
2155 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2153 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2156 struct drm_connector *connector; 2154 struct drm_connector *connector;
2157 struct psb_intel_connector *intel_connector; 2155 struct gma_connector *intel_connector;
2158 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2156 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2159 2157
2160 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2158 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2188,7 +2186,7 @@ psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2188{ 2186{
2189 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2187 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2190 struct drm_connector *connector; 2188 struct drm_connector *connector;
2191 struct psb_intel_connector *intel_connector; 2189 struct gma_connector *intel_connector;
2192 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2190 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2193 2191
2194 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2192 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2540,7 +2538,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
2540bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg) 2538bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2541{ 2539{
2542 struct drm_psb_private *dev_priv = dev->dev_private; 2540 struct drm_psb_private *dev_priv = dev->dev_private;
2543 struct psb_intel_encoder *psb_intel_encoder; 2541 struct gma_encoder *gma_encoder;
2544 struct psb_intel_sdvo *psb_intel_sdvo; 2542 struct psb_intel_sdvo *psb_intel_sdvo;
2545 int i; 2543 int i;
2546 2544
@@ -2557,9 +2555,9 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2557 } 2555 }
2558 2556
2559 /* encoder type will be decided later */ 2557 /* encoder type will be decided later */
2560 psb_intel_encoder = &psb_intel_sdvo->base; 2558 gma_encoder = &psb_intel_sdvo->base;
2561 psb_intel_encoder->type = INTEL_OUTPUT_SDVO; 2559 gma_encoder->type = INTEL_OUTPUT_SDVO;
2562 drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0); 2560 drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
2563 2561
2564 /* Read the regs to test if we can talk to the device */ 2562 /* Read the regs to test if we can talk to the device */
2565 for (i = 0; i < 0x40; i++) { 2563 for (i = 0; i < 0x40; i++) {
@@ -2577,7 +2575,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2577 else 2575 else
2578 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2576 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2579 2577
2580 drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs); 2578 drm_encoder_helper_add(&gma_encoder->base, &psb_intel_sdvo_helper_funcs);
2581 2579
2582 /* In default case sdvo lvds is false */ 2580 /* In default case sdvo lvds is false */
2583 if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps)) 2581 if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
@@ -2620,7 +2618,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2620 return true; 2618 return true;
2621 2619
2622err: 2620err:
2623 drm_encoder_cleanup(&psb_intel_encoder->base); 2621 drm_encoder_cleanup(&gma_encoder->base);
2624 i2c_del_adapter(&psb_intel_sdvo->ddc); 2622 i2c_del_adapter(&psb_intel_sdvo->ddc);
2625 kfree(psb_intel_sdvo); 2623 kfree(psb_intel_sdvo);
2626 2624
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index e68b58a1aaf9..c2bd711e86e9 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -23,7 +23,7 @@
23#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_encoder_slave.h> 24#include <drm/drm_encoder_slave.h>
25#include <drm/drm_edid.h> 25#include <drm/drm_edid.h>
26 26#include <drm/i2c/tda998x.h>
27 27
28#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 28#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
29 29
@@ -32,6 +32,11 @@ struct tda998x_priv {
32 uint16_t rev; 32 uint16_t rev;
33 uint8_t current_page; 33 uint8_t current_page;
34 int dpms; 34 int dpms;
35 bool is_hdmi_sink;
36 u8 vip_cntrl_0;
37 u8 vip_cntrl_1;
38 u8 vip_cntrl_2;
39 struct tda998x_encoder_params params;
35}; 40};
36 41
37#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv) 42#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
@@ -68,10 +73,13 @@ struct tda998x_priv {
68# define I2C_MASTER_DIS_MM (1 << 0) 73# define I2C_MASTER_DIS_MM (1 << 0)
69# define I2C_MASTER_DIS_FILT (1 << 1) 74# define I2C_MASTER_DIS_FILT (1 << 1)
70# define I2C_MASTER_APP_STRT_LAT (1 << 2) 75# define I2C_MASTER_APP_STRT_LAT (1 << 2)
76#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
77# define FEAT_POWERDOWN_SPDIF (1 << 3)
71#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */ 78#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
72#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */ 79#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
73#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */ 80#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
74# define INT_FLAGS_2_EDID_BLK_RD (1 << 1) 81# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
82#define REG_ENA_ACLK REG(0x00, 0x16) /* read/write */
75#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */ 83#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
76#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */ 84#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
77#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */ 85#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
@@ -110,6 +118,8 @@ struct tda998x_priv {
110#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */ 118#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
111# define VIP_CNTRL_5_CKCASE (1 << 0) 119# define VIP_CNTRL_5_CKCASE (1 << 0)
112# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1) 120# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
121#define REG_MUX_AP REG(0x00, 0x26) /* read/write */
122#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */
113#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */ 123#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
114# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0) 124# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
115# define MAT_CONTRL_MAT_BP (1 << 2) 125# define MAT_CONTRL_MAT_BP (1 << 2)
@@ -130,8 +140,12 @@ struct tda998x_priv {
130#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */ 140#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
131#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */ 141#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
132#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */ 142#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
143#define REG_VS_LINE_STRT_2_MSB REG(0x00, 0xb1) /* write */
144#define REG_VS_LINE_STRT_2_LSB REG(0x00, 0xb2) /* write */
133#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */ 145#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
134#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */ 146#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
147#define REG_VS_LINE_END_2_MSB REG(0x00, 0xb5) /* write */
148#define REG_VS_LINE_END_2_LSB REG(0x00, 0xb6) /* write */
135#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */ 149#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
136#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */ 150#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
137#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */ 151#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
@@ -142,21 +156,29 @@ struct tda998x_priv {
142#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */ 156#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
143#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */ 157#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
144#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */ 158#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
159#define REG_VWIN_START_2_MSB REG(0x00, 0xc1) /* write */
160#define REG_VWIN_START_2_LSB REG(0x00, 0xc2) /* write */
161#define REG_VWIN_END_2_MSB REG(0x00, 0xc3) /* write */
162#define REG_VWIN_END_2_LSB REG(0x00, 0xc4) /* write */
145#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */ 163#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
146#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */ 164#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
147#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */ 165#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
148#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */ 166#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
149#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */ 167#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
168# define TBG_CNTRL_0_TOP_TGL (1 << 0)
169# define TBG_CNTRL_0_TOP_SEL (1 << 1)
170# define TBG_CNTRL_0_DE_EXT (1 << 2)
171# define TBG_CNTRL_0_TOP_EXT (1 << 3)
150# define TBG_CNTRL_0_FRAME_DIS (1 << 5) 172# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
151# define TBG_CNTRL_0_SYNC_MTHD (1 << 6) 173# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
152# define TBG_CNTRL_0_SYNC_ONCE (1 << 7) 174# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
153#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */ 175#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
154# define TBG_CNTRL_1_VH_TGL_0 (1 << 0) 176# define TBG_CNTRL_1_H_TGL (1 << 0)
155# define TBG_CNTRL_1_VH_TGL_1 (1 << 1) 177# define TBG_CNTRL_1_V_TGL (1 << 1)
156# define TBG_CNTRL_1_VH_TGL_2 (1 << 2) 178# define TBG_CNTRL_1_TGL_EN (1 << 2)
157# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3) 179# define TBG_CNTRL_1_X_EXT (1 << 3)
158# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4) 180# define TBG_CNTRL_1_H_EXT (1 << 4)
159# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5) 181# define TBG_CNTRL_1_V_EXT (1 << 5)
160# define TBG_CNTRL_1_DWIN_DIS (1 << 6) 182# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
161#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */ 183#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
162#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */ 184#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
@@ -171,6 +193,12 @@ struct tda998x_priv {
171# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4) 193# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
172# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6) 194# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
173#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */ 195#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
196#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */
197# define I2S_FORMAT(x) (((x) & 3) << 0)
198#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */
199# define AIP_CLKSEL_FS(x) (((x) & 3) << 0)
200# define AIP_CLKSEL_CLK_POL(x) (((x) & 1) << 2)
201# define AIP_CLKSEL_AIP(x) (((x) & 7) << 3)
174 202
175 203
176/* Page 02h: PLL settings */ 204/* Page 02h: PLL settings */
@@ -194,6 +222,12 @@ struct tda998x_priv {
194#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */ 222#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
195#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */ 223#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
196#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */ 224#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
225# define AUDIO_DIV_SERCLK_1 0
226# define AUDIO_DIV_SERCLK_2 1
227# define AUDIO_DIV_SERCLK_4 2
228# define AUDIO_DIV_SERCLK_8 3
229# define AUDIO_DIV_SERCLK_16 4
230# define AUDIO_DIV_SERCLK_32 5
197#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */ 231#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
198# define SEL_CLK_SEL_CLK1 (1 << 0) 232# define SEL_CLK_SEL_CLK1 (1 << 0)
199# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1) 233# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
@@ -212,6 +246,11 @@ struct tda998x_priv {
212 246
213 247
214/* Page 10h: information frames and packets */ 248/* Page 10h: information frames and packets */
249#define REG_IF1_HB0 REG(0x10, 0x20) /* read/write */
250#define REG_IF2_HB0 REG(0x10, 0x40) /* read/write */
251#define REG_IF3_HB0 REG(0x10, 0x60) /* read/write */
252#define REG_IF4_HB0 REG(0x10, 0x80) /* read/write */
253#define REG_IF5_HB0 REG(0x10, 0xa0) /* read/write */
215 254
216 255
217/* Page 11h: audio settings and content info packets */ 256/* Page 11h: audio settings and content info packets */
@@ -221,14 +260,39 @@ struct tda998x_priv {
221# define AIP_CNTRL_0_LAYOUT (1 << 2) 260# define AIP_CNTRL_0_LAYOUT (1 << 2)
222# define AIP_CNTRL_0_ACR_MAN (1 << 5) 261# define AIP_CNTRL_0_ACR_MAN (1 << 5)
223# define AIP_CNTRL_0_RST_CTS (1 << 6) 262# define AIP_CNTRL_0_RST_CTS (1 << 6)
263#define REG_CA_I2S REG(0x11, 0x01) /* read/write */
264# define CA_I2S_CA_I2S(x) (((x) & 31) << 0)
265# define CA_I2S_HBR_CHSTAT (1 << 6)
266#define REG_LATENCY_RD REG(0x11, 0x04) /* read/write */
267#define REG_ACR_CTS_0 REG(0x11, 0x05) /* read/write */
268#define REG_ACR_CTS_1 REG(0x11, 0x06) /* read/write */
269#define REG_ACR_CTS_2 REG(0x11, 0x07) /* read/write */
270#define REG_ACR_N_0 REG(0x11, 0x08) /* read/write */
271#define REG_ACR_N_1 REG(0x11, 0x09) /* read/write */
272#define REG_ACR_N_2 REG(0x11, 0x0a) /* read/write */
273#define REG_CTS_N REG(0x11, 0x0c) /* read/write */
274# define CTS_N_K(x) (((x) & 7) << 0)
275# define CTS_N_M(x) (((x) & 3) << 4)
224#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */ 276#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
225# define ENC_CNTRL_RST_ENC (1 << 0) 277# define ENC_CNTRL_RST_ENC (1 << 0)
226# define ENC_CNTRL_RST_SEL (1 << 1) 278# define ENC_CNTRL_RST_SEL (1 << 1)
227# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2) 279# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
280#define REG_DIP_FLAGS REG(0x11, 0x0e) /* read/write */
281# define DIP_FLAGS_ACR (1 << 0)
282# define DIP_FLAGS_GC (1 << 1)
283#define REG_DIP_IF_FLAGS REG(0x11, 0x0f) /* read/write */
284# define DIP_IF_FLAGS_IF1 (1 << 1)
285# define DIP_IF_FLAGS_IF2 (1 << 2)
286# define DIP_IF_FLAGS_IF3 (1 << 3)
287# define DIP_IF_FLAGS_IF4 (1 << 4)
288# define DIP_IF_FLAGS_IF5 (1 << 5)
289#define REG_CH_STAT_B(x) REG(0x11, 0x14 + (x)) /* read/write */
228 290
229 291
230/* Page 12h: HDCP and OTP */ 292/* Page 12h: HDCP and OTP */
231#define REG_TX3 REG(0x12, 0x9a) /* read/write */ 293#define REG_TX3 REG(0x12, 0x9a) /* read/write */
294#define REG_TX4 REG(0x12, 0x9b) /* read/write */
295# define TX4_PD_RAM (1 << 1)
232#define REG_TX33 REG(0x12, 0xb8) /* read/write */ 296#define REG_TX33 REG(0x12, 0xb8) /* read/write */
233# define TX33_HDMI (1 << 1) 297# define TX33_HDMI (1 << 1)
234 298
@@ -338,6 +402,23 @@ fail:
338 return ret; 402 return ret;
339} 403}
340 404
405static void
406reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt)
407{
408 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
409 uint8_t buf[cnt+1];
410 int ret;
411
412 buf[0] = REG2ADDR(reg);
413 memcpy(&buf[1], p, cnt);
414
415 set_page(encoder, reg);
416
417 ret = i2c_master_send(client, buf, cnt + 1);
418 if (ret < 0)
419 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
420}
421
341static uint8_t 422static uint8_t
342reg_read(struct drm_encoder *encoder, uint16_t reg) 423reg_read(struct drm_encoder *encoder, uint16_t reg)
343{ 424{
@@ -406,13 +487,172 @@ tda998x_reset(struct drm_encoder *encoder)
406 reg_write(encoder, REG_SERIALIZER, 0x00); 487 reg_write(encoder, REG_SERIALIZER, 0x00);
407 reg_write(encoder, REG_BUFFER_OUT, 0x00); 488 reg_write(encoder, REG_BUFFER_OUT, 0x00);
408 reg_write(encoder, REG_PLL_SCG1, 0x00); 489 reg_write(encoder, REG_PLL_SCG1, 0x00);
409 reg_write(encoder, REG_AUDIO_DIV, 0x03); 490 reg_write(encoder, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
410 reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); 491 reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
411 reg_write(encoder, REG_PLL_SCGN1, 0xfa); 492 reg_write(encoder, REG_PLL_SCGN1, 0xfa);
412 reg_write(encoder, REG_PLL_SCGN2, 0x00); 493 reg_write(encoder, REG_PLL_SCGN2, 0x00);
413 reg_write(encoder, REG_PLL_SCGR1, 0x5b); 494 reg_write(encoder, REG_PLL_SCGR1, 0x5b);
414 reg_write(encoder, REG_PLL_SCGR2, 0x00); 495 reg_write(encoder, REG_PLL_SCGR2, 0x00);
415 reg_write(encoder, REG_PLL_SCG2, 0x10); 496 reg_write(encoder, REG_PLL_SCG2, 0x10);
497
498 /* Write the default value MUX register */
499 reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24);
500}
501
502static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
503{
504 uint8_t sum = 0;
505
506 while (bytes--)
507 sum += *buf++;
508 return (255 - sum) + 1;
509}
510
511#define HB(x) (x)
512#define PB(x) (HB(2) + 1 + (x))
513
514static void
515tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr,
516 uint8_t *buf, size_t size)
517{
518 buf[PB(0)] = tda998x_cksum(buf, size);
519
520 reg_clear(encoder, REG_DIP_IF_FLAGS, bit);
521 reg_write_range(encoder, addr, buf, size);
522 reg_set(encoder, REG_DIP_IF_FLAGS, bit);
523}
524
525static void
526tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
527{
528 uint8_t buf[PB(5) + 1];
529
530 buf[HB(0)] = 0x84;
531 buf[HB(1)] = 0x01;
532 buf[HB(2)] = 10;
533 buf[PB(0)] = 0;
534 buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
535 buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
536 buf[PB(4)] = p->audio_frame[4];
537 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
538
539 tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
540 sizeof(buf));
541}
542
543static void
544tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
545{
546 uint8_t buf[PB(13) + 1];
547
548 memset(buf, 0, sizeof(buf));
549 buf[HB(0)] = 0x82;
550 buf[HB(1)] = 0x02;
551 buf[HB(2)] = 13;
552 buf[PB(4)] = drm_match_cea_mode(mode);
553
554 tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
555 sizeof(buf));
556}
557
558static void tda998x_audio_mute(struct drm_encoder *encoder, bool on)
559{
560 if (on) {
561 reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
562 reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
563 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
564 } else {
565 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
566 }
567}
568
569static void
570tda998x_configure_audio(struct drm_encoder *encoder,
571 struct drm_display_mode *mode, struct tda998x_encoder_params *p)
572{
573 uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv;
574 uint32_t n;
575
576 /* Enable audio ports */
577 reg_write(encoder, REG_ENA_AP, p->audio_cfg);
578 reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg);
579
580 /* Set audio input source */
581 switch (p->audio_format) {
582 case AFMT_SPDIF:
583 reg_write(encoder, REG_MUX_AP, 0x40);
584 clksel_aip = AIP_CLKSEL_AIP(0);
585 /* FS64SPDIF */
586 clksel_fs = AIP_CLKSEL_FS(2);
587 cts_n = CTS_N_M(3) | CTS_N_K(3);
588 ca_i2s = 0;
589 break;
590
591 case AFMT_I2S:
592 reg_write(encoder, REG_MUX_AP, 0x64);
593 clksel_aip = AIP_CLKSEL_AIP(1);
594 /* ACLK */
595 clksel_fs = AIP_CLKSEL_FS(0);
596 cts_n = CTS_N_M(3) | CTS_N_K(3);
597 ca_i2s = CA_I2S_CA_I2S(0);
598 break;
599 }
600
601 reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
602 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT);
603
604 /* Enable automatic CTS generation */
605 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN);
606 reg_write(encoder, REG_CTS_N, cts_n);
607
608 /*
609 * Audio input somehow depends on HDMI line rate which is
610 * related to pixclk. Testing showed that modes with pixclk
611 * >100MHz need a larger divider while <40MHz need the default.
612 * There is no detailed info in the datasheet, so we just
613 * assume 100MHz requires larger divider.
614 */
615 if (mode->clock > 100000)
616 adiv = AUDIO_DIV_SERCLK_16;
617 else
618 adiv = AUDIO_DIV_SERCLK_8;
619 reg_write(encoder, REG_AUDIO_DIV, adiv);
620
621 /*
622 * This is the approximate value of N, which happens to be
623 * the recommended values for non-coherent clocks.
624 */
625 n = 128 * p->audio_sample_rate / 1000;
626
627 /* Write the CTS and N values */
628 buf[0] = 0x44;
629 buf[1] = 0x42;
630 buf[2] = 0x01;
631 buf[3] = n;
632 buf[4] = n >> 8;
633 buf[5] = n >> 16;
634 reg_write_range(encoder, REG_ACR_CTS_0, buf, 6);
635
636 /* Set CTS clock reference */
637 reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
638
639 /* Reset CTS generator */
640 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
641 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
642
643 /* Write the channel status */
644 buf[0] = 0x04;
645 buf[1] = 0x00;
646 buf[2] = 0x00;
647 buf[3] = 0xf1;
648 reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4);
649
650 tda998x_audio_mute(encoder, true);
651 mdelay(20);
652 tda998x_audio_mute(encoder, false);
653
654 /* Write the audio information packet */
655 tda998x_write_aif(encoder, p);
416} 656}
417 657
418/* DRM encoder functions */ 658/* DRM encoder functions */
@@ -420,6 +660,23 @@ tda998x_reset(struct drm_encoder *encoder)
420static void 660static void
421tda998x_encoder_set_config(struct drm_encoder *encoder, void *params) 661tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
422{ 662{
663 struct tda998x_priv *priv = to_tda998x_priv(encoder);
664 struct tda998x_encoder_params *p = params;
665
666 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
667 (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
668 VIP_CNTRL_0_SWAP_B(p->swap_b) |
669 (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
670 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
671 (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
672 VIP_CNTRL_1_SWAP_D(p->swap_d) |
673 (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
674 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
675 (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
676 VIP_CNTRL_2_SWAP_F(p->swap_f) |
677 (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
678
679 priv->params = *p;
423} 680}
424 681
425static void 682static void
@@ -436,18 +693,14 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
436 693
437 switch (mode) { 694 switch (mode) {
438 case DRM_MODE_DPMS_ON: 695 case DRM_MODE_DPMS_ON:
439 /* enable audio and video ports */ 696 /* enable video ports, audio will be enabled later */
440 reg_write(encoder, REG_ENA_AP, 0xff);
441 reg_write(encoder, REG_ENA_VP_0, 0xff); 697 reg_write(encoder, REG_ENA_VP_0, 0xff);
442 reg_write(encoder, REG_ENA_VP_1, 0xff); 698 reg_write(encoder, REG_ENA_VP_1, 0xff);
443 reg_write(encoder, REG_ENA_VP_2, 0xff); 699 reg_write(encoder, REG_ENA_VP_2, 0xff);
444 /* set muxing after enabling ports: */ 700 /* set muxing after enabling ports: */
445 reg_write(encoder, REG_VIP_CNTRL_0, 701 reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
446 VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3)); 702 reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
447 reg_write(encoder, REG_VIP_CNTRL_1, 703 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
448 VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
449 reg_write(encoder, REG_VIP_CNTRL_2,
450 VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
451 break; 704 break;
452 case DRM_MODE_DPMS_OFF: 705 case DRM_MODE_DPMS_OFF:
453 /* disable audio and video ports */ 706 /* disable audio and video ports */
@@ -494,43 +747,78 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
494 struct drm_display_mode *adjusted_mode) 747 struct drm_display_mode *adjusted_mode)
495{ 748{
496 struct tda998x_priv *priv = to_tda998x_priv(encoder); 749 struct tda998x_priv *priv = to_tda998x_priv(encoder);
497 uint16_t hs_start, hs_end, line_start, line_end; 750 uint16_t ref_pix, ref_line, n_pix, n_line;
498 uint16_t vwin_start, vwin_end, de_start, de_end; 751 uint16_t hs_pix_s, hs_pix_e;
499 uint16_t ref_pix, ref_line, pix_start2; 752 uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
753 uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
754 uint16_t vwin1_line_s, vwin1_line_e;
755 uint16_t vwin2_line_s, vwin2_line_e;
756 uint16_t de_pix_s, de_pix_e;
500 uint8_t reg, div, rep; 757 uint8_t reg, div, rep;
501 758
502 hs_start = mode->hsync_start - mode->hdisplay; 759 /*
503 hs_end = mode->hsync_end - mode->hdisplay; 760 * Internally TDA998x is using ITU-R BT.656 style sync but
504 line_start = 1; 761 * we get VESA style sync. TDA998x is using a reference pixel
505 line_end = 1 + mode->vsync_end - mode->vsync_start; 762 * relative to ITU to sync to the input frame and for output
506 vwin_start = mode->vtotal - mode->vsync_start; 763 * sync generation. Currently, we are using reference detection
507 vwin_end = vwin_start + mode->vdisplay; 764 * from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point
508 de_start = mode->htotal - mode->hdisplay; 765 * which is position of rising VS with coincident rising HS.
509 de_end = mode->htotal; 766 *
510 767 * Now there is some issues to take care of:
511 pix_start2 = 0; 768 * - HDMI data islands require sync-before-active
512 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 769 * - TDA998x register values must be > 0 to be enabled
513 pix_start2 = (mode->htotal / 2) + hs_start; 770 * - REFLINE needs an additional offset of +1
514 771 * - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB
515 /* TODO how is this value calculated? It is 2 for all common 772 *
516 * formats in the tables in out of tree nxp driver (assuming 773 * So we add +1 to all horizontal and vertical register values,
517 * I've properly deciphered their byzantine table system) 774 * plus an additional +3 for REFPIX as we are using RGB input only.
518 */ 775 */
519 ref_line = 2; 776 n_pix = mode->htotal;
520 777 n_line = mode->vtotal;
521 /* this might changes for other color formats from the CRTC: */ 778
522 ref_pix = 3 + hs_start; 779 hs_pix_e = mode->hsync_end - mode->hdisplay;
780 hs_pix_s = mode->hsync_start - mode->hdisplay;
781 de_pix_e = mode->htotal;
782 de_pix_s = mode->htotal - mode->hdisplay;
783 ref_pix = 3 + hs_pix_s;
784
785 /*
786 * Attached LCD controllers may generate broken sync. Allow
787 * those to adjust the position of the rising VS edge by adding
788 * HSKEW to ref_pix.
789 */
790 if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW)
791 ref_pix += adjusted_mode->hskew;
792
793 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) {
794 ref_line = 1 + mode->vsync_start - mode->vdisplay;
795 vwin1_line_s = mode->vtotal - mode->vdisplay - 1;
796 vwin1_line_e = vwin1_line_s + mode->vdisplay;
797 vs1_pix_s = vs1_pix_e = hs_pix_s;
798 vs1_line_s = mode->vsync_start - mode->vdisplay;
799 vs1_line_e = vs1_line_s +
800 mode->vsync_end - mode->vsync_start;
801 vwin2_line_s = vwin2_line_e = 0;
802 vs2_pix_s = vs2_pix_e = 0;
803 vs2_line_s = vs2_line_e = 0;
804 } else {
805 ref_line = 1 + (mode->vsync_start - mode->vdisplay)/2;
806 vwin1_line_s = (mode->vtotal - mode->vdisplay)/2;
807 vwin1_line_e = vwin1_line_s + mode->vdisplay/2;
808 vs1_pix_s = vs1_pix_e = hs_pix_s;
809 vs1_line_s = (mode->vsync_start - mode->vdisplay)/2;
810 vs1_line_e = vs1_line_s +
811 (mode->vsync_end - mode->vsync_start)/2;
812 vwin2_line_s = vwin1_line_s + mode->vtotal/2;
813 vwin2_line_e = vwin2_line_s + mode->vdisplay/2;
814 vs2_pix_s = vs2_pix_e = hs_pix_s + mode->htotal/2;
815 vs2_line_s = vs1_line_s + mode->vtotal/2 ;
816 vs2_line_e = vs2_line_s +
817 (mode->vsync_end - mode->vsync_start)/2;
818 }
523 819
524 div = 148500 / mode->clock; 820 div = 148500 / mode->clock;
525 821
526 DBG("clock=%d, div=%u", mode->clock, div);
527 DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
528 hs_start, hs_end, line_start, line_end);
529 DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
530 vwin_start, vwin_end, de_start, de_end);
531 DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
532 ref_line, ref_pix, pix_start2);
533
534 /* mute the audio FIFO: */ 822 /* mute the audio FIFO: */
535 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); 823 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
536 824
@@ -561,9 +849,6 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
561 reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) | 849 reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
562 PLL_SERIAL_2_SRL_PR(rep)); 850 PLL_SERIAL_2_SRL_PR(rep));
563 851
564 reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
565 reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
566
567 /* set color matrix bypass flag: */ 852 /* set color matrix bypass flag: */
568 reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP); 853 reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
569 854
@@ -572,47 +857,75 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
572 857
573 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD); 858 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
574 859
860 /*
861 * Sync on rising HSYNC/VSYNC
862 */
575 reg_write(encoder, REG_VIP_CNTRL_3, 0); 863 reg_write(encoder, REG_VIP_CNTRL_3, 0);
576 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS); 864 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
865
866 /*
867 * TDA19988 requires high-active sync at input stage,
868 * so invert low-active sync provided by master encoder here
869 */
870 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
871 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
577 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 872 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
578 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL); 873 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
579 874
875 /*
876 * Always generate sync polarity relative to input sync and
877 * revert input stage toggled sync at output stage
878 */
879 reg = TBG_CNTRL_1_TGL_EN;
580 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 880 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
581 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL); 881 reg |= TBG_CNTRL_1_H_TGL;
882 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
883 reg |= TBG_CNTRL_1_V_TGL;
884 reg_write(encoder, REG_TBG_CNTRL_1, reg);
582 885
583 reg_write(encoder, REG_VIDFORMAT, 0x00); 886 reg_write(encoder, REG_VIDFORMAT, 0x00);
584 reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1); 887 reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
585 reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1); 888 reg_write16(encoder, REG_REFLINE_MSB, ref_line);
586 reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start); 889 reg_write16(encoder, REG_NPIX_MSB, n_pix);
587 reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end); 890 reg_write16(encoder, REG_NLINE_MSB, n_line);
588 reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start); 891 reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
589 reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start); 892 reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
590 reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start); 893 reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e);
591 reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end); 894 reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e);
592 reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start); 895 reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
593 reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end); 896 reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
594 reg_write16(encoder, REG_DE_START_MSB, de_start); 897 reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e);
595 reg_write16(encoder, REG_DE_STOP_MSB, de_end); 898 reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e);
899 reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s);
900 reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e);
901 reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s);
902 reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e);
903 reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s);
904 reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e);
905 reg_write16(encoder, REG_DE_START_MSB, de_pix_s);
906 reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e);
596 907
597 if (priv->rev == TDA19988) { 908 if (priv->rev == TDA19988) {
598 /* let incoming pixels fill the active space (if any) */ 909 /* let incoming pixels fill the active space (if any) */
599 reg_write(encoder, REG_ENABLE_SPACE, 0x01); 910 reg_write(encoder, REG_ENABLE_SPACE, 0x01);
600 } 911 }
601 912
602 reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
603 reg_write16(encoder, REG_REFLINE_MSB, ref_line);
604
605 reg = TBG_CNTRL_1_VHX_EXT_DE |
606 TBG_CNTRL_1_VHX_EXT_HS |
607 TBG_CNTRL_1_VHX_EXT_VS |
608 TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
609 TBG_CNTRL_1_VH_TGL_2;
610 if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
611 reg |= TBG_CNTRL_1_VH_TGL_0;
612 reg_set(encoder, REG_TBG_CNTRL_1, reg);
613
614 /* must be last register set: */ 913 /* must be last register set: */
615 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE); 914 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
915
916 /* Only setup the info frames if the sink is HDMI */
917 if (priv->is_hdmi_sink) {
918 /* We need to turn HDMI HDCP stuff on to get audio through */
919 reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
920 reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
921 reg_set(encoder, REG_TX33, TX33_HDMI);
922
923 tda998x_write_avi(encoder, adjusted_mode);
924
925 if (priv->params.audio_cfg)
926 tda998x_configure_audio(encoder, adjusted_mode,
927 &priv->params);
928 }
616} 929}
617 930
618static enum drm_connector_status 931static enum drm_connector_status
@@ -673,6 +986,7 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
673static uint8_t * 986static uint8_t *
674do_get_edid(struct drm_encoder *encoder) 987do_get_edid(struct drm_encoder *encoder)
675{ 988{
989 struct tda998x_priv *priv = to_tda998x_priv(encoder);
676 int j = 0, valid_extensions = 0; 990 int j = 0, valid_extensions = 0;
677 uint8_t *block, *new; 991 uint8_t *block, *new;
678 bool print_bad_edid = drm_debug & DRM_UT_KMS; 992 bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -680,6 +994,9 @@ do_get_edid(struct drm_encoder *encoder)
680 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) 994 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
681 return NULL; 995 return NULL;
682 996
997 if (priv->rev == TDA19988)
998 reg_clear(encoder, REG_TX4, TX4_PD_RAM);
999
683 /* base block fetch */ 1000 /* base block fetch */
684 if (read_edid_block(encoder, block, 0)) 1001 if (read_edid_block(encoder, block, 0))
685 goto fail; 1002 goto fail;
@@ -689,7 +1006,7 @@ do_get_edid(struct drm_encoder *encoder)
689 1006
690 /* if there's no extensions, we're done */ 1007 /* if there's no extensions, we're done */
691 if (block[0x7e] == 0) 1008 if (block[0x7e] == 0)
692 return block; 1009 goto done;
693 1010
694 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); 1011 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
695 if (!new) 1012 if (!new)
@@ -716,9 +1033,15 @@ do_get_edid(struct drm_encoder *encoder)
716 block = new; 1033 block = new;
717 } 1034 }
718 1035
1036done:
1037 if (priv->rev == TDA19988)
1038 reg_set(encoder, REG_TX4, TX4_PD_RAM);
1039
719 return block; 1040 return block;
720 1041
721fail: 1042fail:
1043 if (priv->rev == TDA19988)
1044 reg_set(encoder, REG_TX4, TX4_PD_RAM);
722 dev_warn(encoder->dev->dev, "failed to read EDID\n"); 1045 dev_warn(encoder->dev->dev, "failed to read EDID\n");
723 kfree(block); 1046 kfree(block);
724 return NULL; 1047 return NULL;
@@ -728,12 +1051,14 @@ static int
728tda998x_encoder_get_modes(struct drm_encoder *encoder, 1051tda998x_encoder_get_modes(struct drm_encoder *encoder,
729 struct drm_connector *connector) 1052 struct drm_connector *connector)
730{ 1053{
1054 struct tda998x_priv *priv = to_tda998x_priv(encoder);
731 struct edid *edid = (struct edid *)do_get_edid(encoder); 1055 struct edid *edid = (struct edid *)do_get_edid(encoder);
732 int n = 0; 1056 int n = 0;
733 1057
734 if (edid) { 1058 if (edid) {
735 drm_mode_connector_update_edid_property(connector, edid); 1059 drm_mode_connector_update_edid_property(connector, edid);
736 n = drm_add_edid_modes(connector, edid); 1060 n = drm_add_edid_modes(connector, edid);
1061 priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
737 kfree(edid); 1062 kfree(edid);
738 } 1063 }
739 1064
@@ -807,6 +1132,10 @@ tda998x_encoder_init(struct i2c_client *client,
807 if (!priv) 1132 if (!priv)
808 return -ENOMEM; 1133 return -ENOMEM;
809 1134
1135 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
1136 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
1137 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
1138
810 priv->current_page = 0; 1139 priv->current_page = 0;
811 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1140 priv->cec = i2c_new_dummy(client->adapter, 0x34);
812 priv->dpms = DRM_MODE_DPMS_OFF; 1141 priv->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ada49eda489f..ab1892eb1074 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -113,7 +113,6 @@ static const struct file_operations i810_buffer_fops = {
113 .release = drm_release, 113 .release = drm_release,
114 .unlocked_ioctl = drm_ioctl, 114 .unlocked_ioctl = drm_ioctl,
115 .mmap = i810_mmap_buffers, 115 .mmap = i810_mmap_buffers,
116 .fasync = drm_fasync,
117#ifdef CONFIG_COMPAT 116#ifdef CONFIG_COMPAT
118 .compat_ioctl = drm_compat_ioctl, 117 .compat_ioctl = drm_compat_ioctl,
119#endif 118#endif
@@ -1241,7 +1240,7 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
1241 return 0; 1240 return 0;
1242} 1241}
1243 1242
1244struct drm_ioctl_desc i810_ioctls[] = { 1243const struct drm_ioctl_desc i810_ioctls[] = {
1245 DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1244 DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1246 DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), 1245 DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1247 DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), 1246 DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 2e91fc3580b4..d8180d22cedd 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -49,7 +49,6 @@ static const struct file_operations i810_driver_fops = {
49 .unlocked_ioctl = drm_ioctl, 49 .unlocked_ioctl = drm_ioctl,
50 .mmap = drm_mmap, 50 .mmap = drm_mmap,
51 .poll = drm_poll, 51 .poll = drm_poll,
52 .fasync = drm_fasync,
53#ifdef CONFIG_COMPAT 52#ifdef CONFIG_COMPAT
54 .compat_ioctl = drm_compat_ioctl, 53 .compat_ioctl = drm_compat_ioctl,
55#endif 54#endif
@@ -58,7 +57,7 @@ static const struct file_operations i810_driver_fops = {
58 57
59static struct drm_driver driver = { 58static struct drm_driver driver = {
60 .driver_features = 59 .driver_features =
61 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 60 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
62 DRIVER_HAVE_DMA, 61 DRIVER_HAVE_DMA,
63 .dev_priv_size = sizeof(drm_i810_buf_priv_t), 62 .dev_priv_size = sizeof(drm_i810_buf_priv_t),
64 .load = i810_driver_load, 63 .load = i810_driver_load,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 6e0acad9e0f5..d4d16eddd651 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -125,7 +125,7 @@ extern void i810_driver_preclose(struct drm_device *dev,
125extern int i810_driver_device_is_agp(struct drm_device *dev); 125extern int i810_driver_device_is_agp(struct drm_device *dev);
126 126
127extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 127extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
128extern struct drm_ioctl_desc i810_ioctls[]; 128extern const struct drm_ioctl_desc i810_ioctls[];
129extern int i810_max_ioctl; 129extern int i810_max_ioctl;
130 130
131#define I810_BASE(reg) ((unsigned long) \ 131#define I810_BASE(reg) ((unsigned long) \
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 40034ecefd3b..b8449a84a0dc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -5,6 +5,7 @@
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o \ 6i915-y := i915_drv.o i915_dma.o i915_irq.o \
7 i915_debugfs.o \ 7 i915_debugfs.o \
8 i915_gpu_error.o \
8 i915_suspend.o \ 9 i915_suspend.o \
9 i915_gem.o \ 10 i915_gem.o \
10 i915_gem_context.o \ 11 i915_gem_context.o \
@@ -37,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
37 intel_sprite.o \ 38 intel_sprite.o \
38 intel_opregion.o \ 39 intel_opregion.o \
39 intel_sideband.o \ 40 intel_sideband.o \
41 intel_uncore.o \
40 dvo_ch7xxx.o \ 42 dvo_ch7xxx.o \
41 dvo_ch7017.o \ 43 dvo_ch7017.o \
42 dvo_ivch.o \ 44 dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 757e0fa11043..af42e94f6846 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -307,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
307 idf |= CH7xxx_IDF_HSP; 307 idf |= CH7xxx_IDF_HSP;
308 308
309 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 309 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
310 idf |= CH7xxx_IDF_HSP; 310 idf |= CH7xxx_IDF_VSP;
311 311
312 ch7xxx_writeb(dvo, CH7xxx_IDF, idf); 312 ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
313} 313}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 47d6c748057e..55ab9246e1b9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,7 +30,8 @@
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/export.h> 32#include <linux/export.h>
33#include <generated/utsrelease.h> 33#include <linux/list_sort.h>
34#include <asm/msr-index.h>
34#include <drm/drmP.h> 35#include <drm/drmP.h>
35#include "intel_drv.h" 36#include "intel_drv.h"
36#include "intel_ringbuffer.h" 37#include "intel_ringbuffer.h"
@@ -90,41 +91,45 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
90 } 91 }
91} 92}
92 93
93static const char *cache_level_str(int type) 94static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
94{ 95{
95 switch (type) { 96 return obj->has_global_gtt_mapping ? "g" : " ";
96 case I915_CACHE_NONE: return " uncached";
97 case I915_CACHE_LLC: return " snooped (LLC)";
98 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
99 default: return "";
100 }
101} 97}
102 98
103static void 99static void
104describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 100describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
105{ 101{
106 seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", 102 struct i915_vma *vma;
103 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
107 &obj->base, 104 &obj->base,
108 get_pin_flag(obj), 105 get_pin_flag(obj),
109 get_tiling_flag(obj), 106 get_tiling_flag(obj),
107 get_global_flag(obj),
110 obj->base.size / 1024, 108 obj->base.size / 1024,
111 obj->base.read_domains, 109 obj->base.read_domains,
112 obj->base.write_domain, 110 obj->base.write_domain,
113 obj->last_read_seqno, 111 obj->last_read_seqno,
114 obj->last_write_seqno, 112 obj->last_write_seqno,
115 obj->last_fenced_seqno, 113 obj->last_fenced_seqno,
116 cache_level_str(obj->cache_level), 114 i915_cache_level_str(obj->cache_level),
117 obj->dirty ? " dirty" : "", 115 obj->dirty ? " dirty" : "",
118 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 116 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
119 if (obj->base.name) 117 if (obj->base.name)
120 seq_printf(m, " (name: %d)", obj->base.name); 118 seq_printf(m, " (name: %d)", obj->base.name);
121 if (obj->pin_count) 119 if (obj->pin_count)
122 seq_printf(m, " (pinned x %d)", obj->pin_count); 120 seq_printf(m, " (pinned x %d)", obj->pin_count);
121 if (obj->pin_display)
122 seq_printf(m, " (display)");
123 if (obj->fence_reg != I915_FENCE_REG_NONE) 123 if (obj->fence_reg != I915_FENCE_REG_NONE)
124 seq_printf(m, " (fence: %d)", obj->fence_reg); 124 seq_printf(m, " (fence: %d)", obj->fence_reg);
125 if (obj->gtt_space != NULL) 125 list_for_each_entry(vma, &obj->vma_list, vma_link) {
126 seq_printf(m, " (gtt offset: %08x, size: %08x)", 126 if (!i915_is_ggtt(vma->vm))
127 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 127 seq_puts(m, " (pp");
128 else
129 seq_puts(m, " (g");
130 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
131 vma->node.start, vma->node.size);
132 }
128 if (obj->stolen) 133 if (obj->stolen)
129 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 134 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
130 if (obj->pin_mappable || obj->fault_mappable) { 135 if (obj->pin_mappable || obj->fault_mappable) {
@@ -146,8 +151,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
146 uintptr_t list = (uintptr_t) node->info_ent->data; 151 uintptr_t list = (uintptr_t) node->info_ent->data;
147 struct list_head *head; 152 struct list_head *head;
148 struct drm_device *dev = node->minor->dev; 153 struct drm_device *dev = node->minor->dev;
149 drm_i915_private_t *dev_priv = dev->dev_private; 154 struct drm_i915_private *dev_priv = dev->dev_private;
150 struct drm_i915_gem_object *obj; 155 struct i915_address_space *vm = &dev_priv->gtt.base;
156 struct i915_vma *vma;
151 size_t total_obj_size, total_gtt_size; 157 size_t total_obj_size, total_gtt_size;
152 int count, ret; 158 int count, ret;
153 159
@@ -155,14 +161,15 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
155 if (ret) 161 if (ret)
156 return ret; 162 return ret;
157 163
164 /* FIXME: the user of this interface might want more than just GGTT */
158 switch (list) { 165 switch (list) {
159 case ACTIVE_LIST: 166 case ACTIVE_LIST:
160 seq_printf(m, "Active:\n"); 167 seq_puts(m, "Active:\n");
161 head = &dev_priv->mm.active_list; 168 head = &vm->active_list;
162 break; 169 break;
163 case INACTIVE_LIST: 170 case INACTIVE_LIST:
164 seq_printf(m, "Inactive:\n"); 171 seq_puts(m, "Inactive:\n");
165 head = &dev_priv->mm.inactive_list; 172 head = &vm->inactive_list;
166 break; 173 break;
167 default: 174 default:
168 mutex_unlock(&dev->struct_mutex); 175 mutex_unlock(&dev->struct_mutex);
@@ -170,14 +177,75 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
170 } 177 }
171 178
172 total_obj_size = total_gtt_size = count = 0; 179 total_obj_size = total_gtt_size = count = 0;
173 list_for_each_entry(obj, head, mm_list) { 180 list_for_each_entry(vma, head, mm_list) {
174 seq_printf(m, " "); 181 seq_printf(m, " ");
175 describe_obj(m, obj); 182 describe_obj(m, vma->obj);
176 seq_printf(m, "\n"); 183 seq_printf(m, "\n");
184 total_obj_size += vma->obj->base.size;
185 total_gtt_size += vma->node.size;
186 count++;
187 }
188 mutex_unlock(&dev->struct_mutex);
189
190 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
191 count, total_obj_size, total_gtt_size);
192 return 0;
193}
194
195static int obj_rank_by_stolen(void *priv,
196 struct list_head *A, struct list_head *B)
197{
198 struct drm_i915_gem_object *a =
199 container_of(A, struct drm_i915_gem_object, obj_exec_link);
200 struct drm_i915_gem_object *b =
201 container_of(B, struct drm_i915_gem_object, obj_exec_link);
202
203 return a->stolen->start - b->stolen->start;
204}
205
206static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
207{
208 struct drm_info_node *node = (struct drm_info_node *) m->private;
209 struct drm_device *dev = node->minor->dev;
210 struct drm_i915_private *dev_priv = dev->dev_private;
211 struct drm_i915_gem_object *obj;
212 size_t total_obj_size, total_gtt_size;
213 LIST_HEAD(stolen);
214 int count, ret;
215
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
217 if (ret)
218 return ret;
219
220 total_obj_size = total_gtt_size = count = 0;
221 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
222 if (obj->stolen == NULL)
223 continue;
224
225 list_add(&obj->obj_exec_link, &stolen);
226
227 total_obj_size += obj->base.size;
228 total_gtt_size += i915_gem_obj_ggtt_size(obj);
229 count++;
230 }
231 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
232 if (obj->stolen == NULL)
233 continue;
234
235 list_add(&obj->obj_exec_link, &stolen);
236
177 total_obj_size += obj->base.size; 237 total_obj_size += obj->base.size;
178 total_gtt_size += obj->gtt_space->size;
179 count++; 238 count++;
180 } 239 }
240 list_sort(NULL, &stolen, obj_rank_by_stolen);
241 seq_puts(m, "Stolen:\n");
242 while (!list_empty(&stolen)) {
243 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
244 seq_puts(m, " ");
245 describe_obj(m, obj);
246 seq_putc(m, '\n');
247 list_del_init(&obj->obj_exec_link);
248 }
181 mutex_unlock(&dev->struct_mutex); 249 mutex_unlock(&dev->struct_mutex);
182 250
183 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 251 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
@@ -187,10 +255,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
187 255
188#define count_objects(list, member) do { \ 256#define count_objects(list, member) do { \
189 list_for_each_entry(obj, list, member) { \ 257 list_for_each_entry(obj, list, member) { \
190 size += obj->gtt_space->size; \ 258 size += i915_gem_obj_ggtt_size(obj); \
191 ++count; \ 259 ++count; \
192 if (obj->map_and_fenceable) { \ 260 if (obj->map_and_fenceable) { \
193 mappable_size += obj->gtt_space->size; \ 261 mappable_size += i915_gem_obj_ggtt_size(obj); \
194 ++mappable_count; \ 262 ++mappable_count; \
195 } \ 263 } \
196 } \ 264 } \
@@ -209,7 +277,7 @@ static int per_file_stats(int id, void *ptr, void *data)
209 stats->count++; 277 stats->count++;
210 stats->total += obj->base.size; 278 stats->total += obj->base.size;
211 279
212 if (obj->gtt_space) { 280 if (i915_gem_obj_ggtt_bound(obj)) {
213 if (!list_empty(&obj->ring_list)) 281 if (!list_empty(&obj->ring_list))
214 stats->active += obj->base.size; 282 stats->active += obj->base.size;
215 else 283 else
@@ -222,6 +290,17 @@ static int per_file_stats(int id, void *ptr, void *data)
222 return 0; 290 return 0;
223} 291}
224 292
293#define count_vmas(list, member) do { \
294 list_for_each_entry(vma, list, member) { \
295 size += i915_gem_obj_ggtt_size(vma->obj); \
296 ++count; \
297 if (vma->obj->map_and_fenceable) { \
298 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
299 ++mappable_count; \
300 } \
301 } \
302} while (0)
303
225static int i915_gem_object_info(struct seq_file *m, void* data) 304static int i915_gem_object_info(struct seq_file *m, void* data)
226{ 305{
227 struct drm_info_node *node = (struct drm_info_node *) m->private; 306 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -230,7 +309,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
230 u32 count, mappable_count, purgeable_count; 309 u32 count, mappable_count, purgeable_count;
231 size_t size, mappable_size, purgeable_size; 310 size_t size, mappable_size, purgeable_size;
232 struct drm_i915_gem_object *obj; 311 struct drm_i915_gem_object *obj;
312 struct i915_address_space *vm = &dev_priv->gtt.base;
233 struct drm_file *file; 313 struct drm_file *file;
314 struct i915_vma *vma;
234 int ret; 315 int ret;
235 316
236 ret = mutex_lock_interruptible(&dev->struct_mutex); 317 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -247,12 +328,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
247 count, mappable_count, size, mappable_size); 328 count, mappable_count, size, mappable_size);
248 329
249 size = count = mappable_size = mappable_count = 0; 330 size = count = mappable_size = mappable_count = 0;
250 count_objects(&dev_priv->mm.active_list, mm_list); 331 count_vmas(&vm->active_list, mm_list);
251 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 332 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
252 count, mappable_count, size, mappable_size); 333 count, mappable_count, size, mappable_size);
253 334
254 size = count = mappable_size = mappable_count = 0; 335 size = count = mappable_size = mappable_count = 0;
255 count_objects(&dev_priv->mm.inactive_list, mm_list); 336 count_vmas(&vm->inactive_list, mm_list);
256 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 337 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
257 count, mappable_count, size, mappable_size); 338 count, mappable_count, size, mappable_size);
258 339
@@ -267,11 +348,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
267 size = count = mappable_size = mappable_count = 0; 348 size = count = mappable_size = mappable_count = 0;
268 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 349 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
269 if (obj->fault_mappable) { 350 if (obj->fault_mappable) {
270 size += obj->gtt_space->size; 351 size += i915_gem_obj_ggtt_size(obj);
271 ++count; 352 ++count;
272 } 353 }
273 if (obj->pin_mappable) { 354 if (obj->pin_mappable) {
274 mappable_size += obj->gtt_space->size; 355 mappable_size += i915_gem_obj_ggtt_size(obj);
275 ++mappable_count; 356 ++mappable_count;
276 } 357 }
277 if (obj->madv == I915_MADV_DONTNEED) { 358 if (obj->madv == I915_MADV_DONTNEED) {
@@ -287,10 +368,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
287 count, size); 368 count, size);
288 369
289 seq_printf(m, "%zu [%lu] gtt total\n", 370 seq_printf(m, "%zu [%lu] gtt total\n",
290 dev_priv->gtt.total, 371 dev_priv->gtt.base.total,
291 dev_priv->gtt.mappable_end - dev_priv->gtt.start); 372 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
292 373
293 seq_printf(m, "\n"); 374 seq_putc(m, '\n');
294 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 375 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
295 struct file_stats stats; 376 struct file_stats stats;
296 377
@@ -310,7 +391,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
310 return 0; 391 return 0;
311} 392}
312 393
313static int i915_gem_gtt_info(struct seq_file *m, void* data) 394static int i915_gem_gtt_info(struct seq_file *m, void *data)
314{ 395{
315 struct drm_info_node *node = (struct drm_info_node *) m->private; 396 struct drm_info_node *node = (struct drm_info_node *) m->private;
316 struct drm_device *dev = node->minor->dev; 397 struct drm_device *dev = node->minor->dev;
@@ -329,11 +410,11 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
329 if (list == PINNED_LIST && obj->pin_count == 0) 410 if (list == PINNED_LIST && obj->pin_count == 0)
330 continue; 411 continue;
331 412
332 seq_printf(m, " "); 413 seq_puts(m, " ");
333 describe_obj(m, obj); 414 describe_obj(m, obj);
334 seq_printf(m, "\n"); 415 seq_putc(m, '\n');
335 total_obj_size += obj->base.size; 416 total_obj_size += obj->base.size;
336 total_gtt_size += obj->gtt_space->size; 417 total_gtt_size += i915_gem_obj_ggtt_size(obj);
337 count++; 418 count++;
338 } 419 }
339 420
@@ -371,20 +452,22 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
371 pipe, plane); 452 pipe, plane);
372 } 453 }
373 if (work->enable_stall_check) 454 if (work->enable_stall_check)
374 seq_printf(m, "Stall check enabled, "); 455 seq_puts(m, "Stall check enabled, ");
375 else 456 else
376 seq_printf(m, "Stall check waiting for page flip ioctl, "); 457 seq_puts(m, "Stall check waiting for page flip ioctl, ");
377 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 458 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
378 459
379 if (work->old_fb_obj) { 460 if (work->old_fb_obj) {
380 struct drm_i915_gem_object *obj = work->old_fb_obj; 461 struct drm_i915_gem_object *obj = work->old_fb_obj;
381 if (obj) 462 if (obj)
382 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 463 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
464 i915_gem_obj_ggtt_offset(obj));
383 } 465 }
384 if (work->pending_flip_obj) { 466 if (work->pending_flip_obj) {
385 struct drm_i915_gem_object *obj = work->pending_flip_obj; 467 struct drm_i915_gem_object *obj = work->pending_flip_obj;
386 if (obj) 468 if (obj)
387 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 469 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
470 i915_gem_obj_ggtt_offset(obj));
388 } 471 }
389 } 472 }
390 spin_unlock_irqrestore(&dev->event_lock, flags); 473 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -424,7 +507,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
424 mutex_unlock(&dev->struct_mutex); 507 mutex_unlock(&dev->struct_mutex);
425 508
426 if (count == 0) 509 if (count == 0)
427 seq_printf(m, "No requests\n"); 510 seq_puts(m, "No requests\n");
428 511
429 return 0; 512 return 0;
430} 513}
@@ -574,10 +657,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
574 seq_printf(m, "Fence %d, pin count = %d, object = ", 657 seq_printf(m, "Fence %d, pin count = %d, object = ",
575 i, dev_priv->fence_regs[i].pin_count); 658 i, dev_priv->fence_regs[i].pin_count);
576 if (obj == NULL) 659 if (obj == NULL)
577 seq_printf(m, "unused"); 660 seq_puts(m, "unused");
578 else 661 else
579 describe_obj(m, obj); 662 describe_obj(m, obj);
580 seq_printf(m, "\n"); 663 seq_putc(m, '\n');
581 } 664 }
582 665
583 mutex_unlock(&dev->struct_mutex); 666 mutex_unlock(&dev->struct_mutex);
@@ -606,361 +689,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
606 return 0; 689 return 0;
607} 690}
608 691
609static const char *ring_str(int ring)
610{
611 switch (ring) {
612 case RCS: return "render";
613 case VCS: return "bsd";
614 case BCS: return "blt";
615 case VECS: return "vebox";
616 default: return "";
617 }
618}
619
620static const char *pin_flag(int pinned)
621{
622 if (pinned > 0)
623 return " P";
624 else if (pinned < 0)
625 return " p";
626 else
627 return "";
628}
629
630static const char *tiling_flag(int tiling)
631{
632 switch (tiling) {
633 default:
634 case I915_TILING_NONE: return "";
635 case I915_TILING_X: return " X";
636 case I915_TILING_Y: return " Y";
637 }
638}
639
640static const char *dirty_flag(int dirty)
641{
642 return dirty ? " dirty" : "";
643}
644
645static const char *purgeable_flag(int purgeable)
646{
647 return purgeable ? " purgeable" : "";
648}
649
650static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
651{
652
653 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
654 e->err = -ENOSPC;
655 return false;
656 }
657
658 if (e->bytes == e->size - 1 || e->err)
659 return false;
660
661 return true;
662}
663
664static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
665 unsigned len)
666{
667 if (e->pos + len <= e->start) {
668 e->pos += len;
669 return false;
670 }
671
672 /* First vsnprintf needs to fit in its entirety for memmove */
673 if (len >= e->size) {
674 e->err = -EIO;
675 return false;
676 }
677
678 return true;
679}
680
681static void __i915_error_advance(struct drm_i915_error_state_buf *e,
682 unsigned len)
683{
684 /* If this is first printf in this window, adjust it so that
685 * start position matches start of the buffer
686 */
687
688 if (e->pos < e->start) {
689 const size_t off = e->start - e->pos;
690
691 /* Should not happen but be paranoid */
692 if (off > len || e->bytes) {
693 e->err = -EIO;
694 return;
695 }
696
697 memmove(e->buf, e->buf + off, len - off);
698 e->bytes = len - off;
699 e->pos = e->start;
700 return;
701 }
702
703 e->bytes += len;
704 e->pos += len;
705}
706
707static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
708 const char *f, va_list args)
709{
710 unsigned len;
711
712 if (!__i915_error_ok(e))
713 return;
714
715 /* Seek the first printf which is hits start position */
716 if (e->pos < e->start) {
717 len = vsnprintf(NULL, 0, f, args);
718 if (!__i915_error_seek(e, len))
719 return;
720 }
721
722 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
723 if (len >= e->size - e->bytes)
724 len = e->size - e->bytes - 1;
725
726 __i915_error_advance(e, len);
727}
728
729static void i915_error_puts(struct drm_i915_error_state_buf *e,
730 const char *str)
731{
732 unsigned len;
733
734 if (!__i915_error_ok(e))
735 return;
736
737 len = strlen(str);
738
739 /* Seek the first printf which is hits start position */
740 if (e->pos < e->start) {
741 if (!__i915_error_seek(e, len))
742 return;
743 }
744
745 if (len >= e->size - e->bytes)
746 len = e->size - e->bytes - 1;
747 memcpy(e->buf + e->bytes, str, len);
748
749 __i915_error_advance(e, len);
750}
751
752void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
753{
754 va_list args;
755
756 va_start(args, f);
757 i915_error_vprintf(e, f, args);
758 va_end(args);
759}
760
761#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
762#define err_puts(e, s) i915_error_puts(e, s)
763
764static void print_error_buffers(struct drm_i915_error_state_buf *m,
765 const char *name,
766 struct drm_i915_error_buffer *err,
767 int count)
768{
769 err_printf(m, "%s [%d]:\n", name, count);
770
771 while (count--) {
772 err_printf(m, " %08x %8u %02x %02x %x %x",
773 err->gtt_offset,
774 err->size,
775 err->read_domains,
776 err->write_domain,
777 err->rseqno, err->wseqno);
778 err_puts(m, pin_flag(err->pinned));
779 err_puts(m, tiling_flag(err->tiling));
780 err_puts(m, dirty_flag(err->dirty));
781 err_puts(m, purgeable_flag(err->purgeable));
782 err_puts(m, err->ring != -1 ? " " : "");
783 err_puts(m, ring_str(err->ring));
784 err_puts(m, cache_level_str(err->cache_level));
785
786 if (err->name)
787 err_printf(m, " (name: %d)", err->name);
788 if (err->fence_reg != I915_FENCE_REG_NONE)
789 err_printf(m, " (fence: %d)", err->fence_reg);
790
791 err_puts(m, "\n");
792 err++;
793 }
794}
795
796static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
797 struct drm_device *dev,
798 struct drm_i915_error_state *error,
799 unsigned ring)
800{
801 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
802 err_printf(m, "%s command stream:\n", ring_str(ring));
803 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
804 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
805 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
806 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
807 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
808 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
809 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
810 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
811 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
812
813 if (INTEL_INFO(dev)->gen >= 4)
814 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
815 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
816 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
817 if (INTEL_INFO(dev)->gen >= 6) {
818 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
819 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
820 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
821 error->semaphore_mboxes[ring][0],
822 error->semaphore_seqno[ring][0]);
823 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
824 error->semaphore_mboxes[ring][1],
825 error->semaphore_seqno[ring][1]);
826 }
827 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
828 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
829 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
830 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
831}
832
833struct i915_error_state_file_priv {
834 struct drm_device *dev;
835 struct drm_i915_error_state *error;
836};
837
838
839static int i915_error_state(struct i915_error_state_file_priv *error_priv,
840 struct drm_i915_error_state_buf *m)
841
842{
843 struct drm_device *dev = error_priv->dev;
844 drm_i915_private_t *dev_priv = dev->dev_private;
845 struct drm_i915_error_state *error = error_priv->error;
846 struct intel_ring_buffer *ring;
847 int i, j, page, offset, elt;
848
849 if (!error) {
850 err_printf(m, "no error state collected\n");
851 return 0;
852 }
853
854 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
855 error->time.tv_usec);
856 err_printf(m, "Kernel: " UTS_RELEASE "\n");
857 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
858 err_printf(m, "EIR: 0x%08x\n", error->eir);
859 err_printf(m, "IER: 0x%08x\n", error->ier);
860 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
861 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
862 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
863 err_printf(m, "CCID: 0x%08x\n", error->ccid);
864
865 for (i = 0; i < dev_priv->num_fence_regs; i++)
866 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
867
868 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
869 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
870 error->extra_instdone[i]);
871
872 if (INTEL_INFO(dev)->gen >= 6) {
873 err_printf(m, "ERROR: 0x%08x\n", error->error);
874 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
875 }
876
877 if (INTEL_INFO(dev)->gen == 7)
878 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
879
880 for_each_ring(ring, dev_priv, i)
881 i915_ring_error_state(m, dev, error, i);
882
883 if (error->active_bo)
884 print_error_buffers(m, "Active",
885 error->active_bo,
886 error->active_bo_count);
887
888 if (error->pinned_bo)
889 print_error_buffers(m, "Pinned",
890 error->pinned_bo,
891 error->pinned_bo_count);
892
893 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
894 struct drm_i915_error_object *obj;
895
896 if ((obj = error->ring[i].batchbuffer)) {
897 err_printf(m, "%s --- gtt_offset = 0x%08x\n",
898 dev_priv->ring[i].name,
899 obj->gtt_offset);
900 offset = 0;
901 for (page = 0; page < obj->page_count; page++) {
902 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
903 err_printf(m, "%08x : %08x\n", offset,
904 obj->pages[page][elt]);
905 offset += 4;
906 }
907 }
908 }
909
910 if (error->ring[i].num_requests) {
911 err_printf(m, "%s --- %d requests\n",
912 dev_priv->ring[i].name,
913 error->ring[i].num_requests);
914 for (j = 0; j < error->ring[i].num_requests; j++) {
915 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
916 error->ring[i].requests[j].seqno,
917 error->ring[i].requests[j].jiffies,
918 error->ring[i].requests[j].tail);
919 }
920 }
921
922 if ((obj = error->ring[i].ringbuffer)) {
923 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
924 dev_priv->ring[i].name,
925 obj->gtt_offset);
926 offset = 0;
927 for (page = 0; page < obj->page_count; page++) {
928 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
929 err_printf(m, "%08x : %08x\n",
930 offset,
931 obj->pages[page][elt]);
932 offset += 4;
933 }
934 }
935 }
936
937 obj = error->ring[i].ctx;
938 if (obj) {
939 err_printf(m, "%s --- HW Context = 0x%08x\n",
940 dev_priv->ring[i].name,
941 obj->gtt_offset);
942 offset = 0;
943 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
944 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
945 offset,
946 obj->pages[0][elt],
947 obj->pages[0][elt+1],
948 obj->pages[0][elt+2],
949 obj->pages[0][elt+3]);
950 offset += 16;
951 }
952 }
953 }
954
955 if (error->overlay)
956 intel_overlay_print_error_state(m, error->overlay);
957
958 if (error->display)
959 intel_display_print_error_state(m, dev, error->display);
960
961 return 0;
962}
963
964static ssize_t 692static ssize_t
965i915_error_state_write(struct file *filp, 693i915_error_state_write(struct file *filp,
966 const char __user *ubuf, 694 const char __user *ubuf,
@@ -986,9 +714,7 @@ i915_error_state_write(struct file *filp,
986static int i915_error_state_open(struct inode *inode, struct file *file) 714static int i915_error_state_open(struct inode *inode, struct file *file)
987{ 715{
988 struct drm_device *dev = inode->i_private; 716 struct drm_device *dev = inode->i_private;
989 drm_i915_private_t *dev_priv = dev->dev_private;
990 struct i915_error_state_file_priv *error_priv; 717 struct i915_error_state_file_priv *error_priv;
991 unsigned long flags;
992 718
993 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 719 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
994 if (!error_priv) 720 if (!error_priv)
@@ -996,11 +722,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
996 722
997 error_priv->dev = dev; 723 error_priv->dev = dev;
998 724
999 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 725 i915_error_state_get(dev, error_priv);
1000 error_priv->error = dev_priv->gpu_error.first_error;
1001 if (error_priv->error)
1002 kref_get(&error_priv->error->ref);
1003 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1004 726
1005 file->private_data = error_priv; 727 file->private_data = error_priv;
1006 728
@@ -1011,8 +733,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file)
1011{ 733{
1012 struct i915_error_state_file_priv *error_priv = file->private_data; 734 struct i915_error_state_file_priv *error_priv = file->private_data;
1013 735
1014 if (error_priv->error) 736 i915_error_state_put(error_priv);
1015 kref_put(&error_priv->error->ref, i915_error_state_free);
1016 kfree(error_priv); 737 kfree(error_priv);
1017 738
1018 return 0; 739 return 0;
@@ -1025,40 +746,15 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1025 struct drm_i915_error_state_buf error_str; 746 struct drm_i915_error_state_buf error_str;
1026 loff_t tmp_pos = 0; 747 loff_t tmp_pos = 0;
1027 ssize_t ret_count = 0; 748 ssize_t ret_count = 0;
1028 int ret = 0; 749 int ret;
1029
1030 memset(&error_str, 0, sizeof(error_str));
1031
1032 /* We need to have enough room to store any i915_error_state printf
1033 * so that we can move it to start position.
1034 */
1035 error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
1036 error_str.buf = kmalloc(error_str.size,
1037 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
1038
1039 if (error_str.buf == NULL) {
1040 error_str.size = PAGE_SIZE;
1041 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
1042 }
1043
1044 if (error_str.buf == NULL) {
1045 error_str.size = 128;
1046 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
1047 }
1048
1049 if (error_str.buf == NULL)
1050 return -ENOMEM;
1051
1052 error_str.start = *pos;
1053 750
1054 ret = i915_error_state(error_priv, &error_str); 751 ret = i915_error_state_buf_init(&error_str, count, *pos);
1055 if (ret) 752 if (ret)
1056 goto out; 753 return ret;
1057 754
1058 if (error_str.bytes == 0 && error_str.err) { 755 ret = i915_error_state_to_str(&error_str, error_priv);
1059 ret = error_str.err; 756 if (ret)
1060 goto out; 757 goto out;
1061 }
1062 758
1063 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 759 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1064 error_str.buf, 760 error_str.buf,
@@ -1069,7 +765,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1069 else 765 else
1070 *pos = error_str.start + ret_count; 766 *pos = error_str.start + ret_count;
1071out: 767out:
1072 kfree(error_str.buf); 768 i915_error_state_buf_release(&error_str);
1073 return ret ?: ret_count; 769 return ret ?: ret_count;
1074} 770}
1075 771
@@ -1246,7 +942,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1246 (freq_sts >> 8) & 0xff)); 942 (freq_sts >> 8) & 0xff));
1247 mutex_unlock(&dev_priv->rps.hw_lock); 943 mutex_unlock(&dev_priv->rps.hw_lock);
1248 } else { 944 } else {
1249 seq_printf(m, "no P-state info available\n"); 945 seq_puts(m, "no P-state info available\n");
1250 } 946 }
1251 947
1252 return 0; 948 return 0;
@@ -1341,28 +1037,28 @@ static int ironlake_drpc_info(struct seq_file *m)
1341 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1037 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1342 seq_printf(m, "Render standby enabled: %s\n", 1038 seq_printf(m, "Render standby enabled: %s\n",
1343 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1039 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1344 seq_printf(m, "Current RS state: "); 1040 seq_puts(m, "Current RS state: ");
1345 switch (rstdbyctl & RSX_STATUS_MASK) { 1041 switch (rstdbyctl & RSX_STATUS_MASK) {
1346 case RSX_STATUS_ON: 1042 case RSX_STATUS_ON:
1347 seq_printf(m, "on\n"); 1043 seq_puts(m, "on\n");
1348 break; 1044 break;
1349 case RSX_STATUS_RC1: 1045 case RSX_STATUS_RC1:
1350 seq_printf(m, "RC1\n"); 1046 seq_puts(m, "RC1\n");
1351 break; 1047 break;
1352 case RSX_STATUS_RC1E: 1048 case RSX_STATUS_RC1E:
1353 seq_printf(m, "RC1E\n"); 1049 seq_puts(m, "RC1E\n");
1354 break; 1050 break;
1355 case RSX_STATUS_RS1: 1051 case RSX_STATUS_RS1:
1356 seq_printf(m, "RS1\n"); 1052 seq_puts(m, "RS1\n");
1357 break; 1053 break;
1358 case RSX_STATUS_RS2: 1054 case RSX_STATUS_RS2:
1359 seq_printf(m, "RS2 (RC6)\n"); 1055 seq_puts(m, "RS2 (RC6)\n");
1360 break; 1056 break;
1361 case RSX_STATUS_RS3: 1057 case RSX_STATUS_RS3:
1362 seq_printf(m, "RC3 (RC6+)\n"); 1058 seq_puts(m, "RC3 (RC6+)\n");
1363 break; 1059 break;
1364 default: 1060 default:
1365 seq_printf(m, "unknown\n"); 1061 seq_puts(m, "unknown\n");
1366 break; 1062 break;
1367 } 1063 }
1368 1064
@@ -1377,20 +1073,19 @@ static int gen6_drpc_info(struct seq_file *m)
1377 struct drm_i915_private *dev_priv = dev->dev_private; 1073 struct drm_i915_private *dev_priv = dev->dev_private;
1378 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1074 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1379 unsigned forcewake_count; 1075 unsigned forcewake_count;
1380 int count=0, ret; 1076 int count = 0, ret;
1381
1382 1077
1383 ret = mutex_lock_interruptible(&dev->struct_mutex); 1078 ret = mutex_lock_interruptible(&dev->struct_mutex);
1384 if (ret) 1079 if (ret)
1385 return ret; 1080 return ret;
1386 1081
1387 spin_lock_irq(&dev_priv->gt_lock); 1082 spin_lock_irq(&dev_priv->uncore.lock);
1388 forcewake_count = dev_priv->forcewake_count; 1083 forcewake_count = dev_priv->uncore.forcewake_count;
1389 spin_unlock_irq(&dev_priv->gt_lock); 1084 spin_unlock_irq(&dev_priv->uncore.lock);
1390 1085
1391 if (forcewake_count) { 1086 if (forcewake_count) {
1392 seq_printf(m, "RC information inaccurate because somebody " 1087 seq_puts(m, "RC information inaccurate because somebody "
1393 "holds a forcewake reference \n"); 1088 "holds a forcewake reference \n");
1394 } else { 1089 } else {
1395 /* NB: we cannot use forcewake, else we read the wrong values */ 1090 /* NB: we cannot use forcewake, else we read the wrong values */
1396 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1091 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
@@ -1399,7 +1094,7 @@ static int gen6_drpc_info(struct seq_file *m)
1399 } 1094 }
1400 1095
1401 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1096 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1402 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1097 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1403 1098
1404 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1099 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1405 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1100 rcctl1 = I915_READ(GEN6_RC_CONTROL);
@@ -1423,25 +1118,25 @@ static int gen6_drpc_info(struct seq_file *m)
1423 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1118 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1424 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1119 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1425 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1120 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1426 seq_printf(m, "Current RC state: "); 1121 seq_puts(m, "Current RC state: ");
1427 switch (gt_core_status & GEN6_RCn_MASK) { 1122 switch (gt_core_status & GEN6_RCn_MASK) {
1428 case GEN6_RC0: 1123 case GEN6_RC0:
1429 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1124 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1430 seq_printf(m, "Core Power Down\n"); 1125 seq_puts(m, "Core Power Down\n");
1431 else 1126 else
1432 seq_printf(m, "on\n"); 1127 seq_puts(m, "on\n");
1433 break; 1128 break;
1434 case GEN6_RC3: 1129 case GEN6_RC3:
1435 seq_printf(m, "RC3\n"); 1130 seq_puts(m, "RC3\n");
1436 break; 1131 break;
1437 case GEN6_RC6: 1132 case GEN6_RC6:
1438 seq_printf(m, "RC6\n"); 1133 seq_puts(m, "RC6\n");
1439 break; 1134 break;
1440 case GEN6_RC7: 1135 case GEN6_RC7:
1441 seq_printf(m, "RC7\n"); 1136 seq_puts(m, "RC7\n");
1442 break; 1137 break;
1443 default: 1138 default:
1444 seq_printf(m, "Unknown\n"); 1139 seq_puts(m, "Unknown\n");
1445 break; 1140 break;
1446 } 1141 }
1447 1142
@@ -1485,43 +1180,52 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1485 drm_i915_private_t *dev_priv = dev->dev_private; 1180 drm_i915_private_t *dev_priv = dev->dev_private;
1486 1181
1487 if (!I915_HAS_FBC(dev)) { 1182 if (!I915_HAS_FBC(dev)) {
1488 seq_printf(m, "FBC unsupported on this chipset\n"); 1183 seq_puts(m, "FBC unsupported on this chipset\n");
1489 return 0; 1184 return 0;
1490 } 1185 }
1491 1186
1492 if (intel_fbc_enabled(dev)) { 1187 if (intel_fbc_enabled(dev)) {
1493 seq_printf(m, "FBC enabled\n"); 1188 seq_puts(m, "FBC enabled\n");
1494 } else { 1189 } else {
1495 seq_printf(m, "FBC disabled: "); 1190 seq_puts(m, "FBC disabled: ");
1496 switch (dev_priv->no_fbc_reason) { 1191 switch (dev_priv->fbc.no_fbc_reason) {
1192 case FBC_OK:
1193 seq_puts(m, "FBC actived, but currently disabled in hardware");
1194 break;
1195 case FBC_UNSUPPORTED:
1196 seq_puts(m, "unsupported by this chipset");
1197 break;
1497 case FBC_NO_OUTPUT: 1198 case FBC_NO_OUTPUT:
1498 seq_printf(m, "no outputs"); 1199 seq_puts(m, "no outputs");
1499 break; 1200 break;
1500 case FBC_STOLEN_TOO_SMALL: 1201 case FBC_STOLEN_TOO_SMALL:
1501 seq_printf(m, "not enough stolen memory"); 1202 seq_puts(m, "not enough stolen memory");
1502 break; 1203 break;
1503 case FBC_UNSUPPORTED_MODE: 1204 case FBC_UNSUPPORTED_MODE:
1504 seq_printf(m, "mode not supported"); 1205 seq_puts(m, "mode not supported");
1505 break; 1206 break;
1506 case FBC_MODE_TOO_LARGE: 1207 case FBC_MODE_TOO_LARGE:
1507 seq_printf(m, "mode too large"); 1208 seq_puts(m, "mode too large");
1508 break; 1209 break;
1509 case FBC_BAD_PLANE: 1210 case FBC_BAD_PLANE:
1510 seq_printf(m, "FBC unsupported on plane"); 1211 seq_puts(m, "FBC unsupported on plane");
1511 break; 1212 break;
1512 case FBC_NOT_TILED: 1213 case FBC_NOT_TILED:
1513 seq_printf(m, "scanout buffer not tiled"); 1214 seq_puts(m, "scanout buffer not tiled");
1514 break; 1215 break;
1515 case FBC_MULTIPLE_PIPES: 1216 case FBC_MULTIPLE_PIPES:
1516 seq_printf(m, "multiple pipes are enabled"); 1217 seq_puts(m, "multiple pipes are enabled");
1517 break; 1218 break;
1518 case FBC_MODULE_PARAM: 1219 case FBC_MODULE_PARAM:
1519 seq_printf(m, "disabled per module param (default off)"); 1220 seq_puts(m, "disabled per module param (default off)");
1221 break;
1222 case FBC_CHIP_DEFAULT:
1223 seq_puts(m, "disabled per chip default");
1520 break; 1224 break;
1521 default: 1225 default:
1522 seq_printf(m, "unknown reason"); 1226 seq_puts(m, "unknown reason");
1523 } 1227 }
1524 seq_printf(m, "\n"); 1228 seq_putc(m, '\n');
1525 } 1229 }
1526 return 0; 1230 return 0;
1527} 1231}
@@ -1604,7 +1308,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1604 int gpu_freq, ia_freq; 1308 int gpu_freq, ia_freq;
1605 1309
1606 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1310 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1607 seq_printf(m, "unsupported on this chipset\n"); 1311 seq_puts(m, "unsupported on this chipset\n");
1608 return 0; 1312 return 0;
1609 } 1313 }
1610 1314
@@ -1612,7 +1316,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1612 if (ret) 1316 if (ret)
1613 return ret; 1317 return ret;
1614 1318
1615 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1319 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1616 1320
1617 for (gpu_freq = dev_priv->rps.min_delay; 1321 for (gpu_freq = dev_priv->rps.min_delay;
1618 gpu_freq <= dev_priv->rps.max_delay; 1322 gpu_freq <= dev_priv->rps.max_delay;
@@ -1701,7 +1405,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1701 fb->base.bits_per_pixel, 1405 fb->base.bits_per_pixel,
1702 atomic_read(&fb->base.refcount.refcount)); 1406 atomic_read(&fb->base.refcount.refcount));
1703 describe_obj(m, fb->obj); 1407 describe_obj(m, fb->obj);
1704 seq_printf(m, "\n"); 1408 seq_putc(m, '\n');
1705 mutex_unlock(&dev->mode_config.mutex); 1409 mutex_unlock(&dev->mode_config.mutex);
1706 1410
1707 mutex_lock(&dev->mode_config.fb_lock); 1411 mutex_lock(&dev->mode_config.fb_lock);
@@ -1716,7 +1420,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1716 fb->base.bits_per_pixel, 1420 fb->base.bits_per_pixel,
1717 atomic_read(&fb->base.refcount.refcount)); 1421 atomic_read(&fb->base.refcount.refcount));
1718 describe_obj(m, fb->obj); 1422 describe_obj(m, fb->obj);
1719 seq_printf(m, "\n"); 1423 seq_putc(m, '\n');
1720 } 1424 }
1721 mutex_unlock(&dev->mode_config.fb_lock); 1425 mutex_unlock(&dev->mode_config.fb_lock);
1722 1426
@@ -1736,22 +1440,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
1736 return ret; 1440 return ret;
1737 1441
1738 if (dev_priv->ips.pwrctx) { 1442 if (dev_priv->ips.pwrctx) {
1739 seq_printf(m, "power context "); 1443 seq_puts(m, "power context ");
1740 describe_obj(m, dev_priv->ips.pwrctx); 1444 describe_obj(m, dev_priv->ips.pwrctx);
1741 seq_printf(m, "\n"); 1445 seq_putc(m, '\n');
1742 } 1446 }
1743 1447
1744 if (dev_priv->ips.renderctx) { 1448 if (dev_priv->ips.renderctx) {
1745 seq_printf(m, "render context "); 1449 seq_puts(m, "render context ");
1746 describe_obj(m, dev_priv->ips.renderctx); 1450 describe_obj(m, dev_priv->ips.renderctx);
1747 seq_printf(m, "\n"); 1451 seq_putc(m, '\n');
1748 } 1452 }
1749 1453
1750 for_each_ring(ring, dev_priv, i) { 1454 for_each_ring(ring, dev_priv, i) {
1751 if (ring->default_context) { 1455 if (ring->default_context) {
1752 seq_printf(m, "HW default context %s ring ", ring->name); 1456 seq_printf(m, "HW default context %s ring ", ring->name);
1753 describe_obj(m, ring->default_context->obj); 1457 describe_obj(m, ring->default_context->obj);
1754 seq_printf(m, "\n"); 1458 seq_putc(m, '\n');
1755 } 1459 }
1756 } 1460 }
1757 1461
@@ -1767,9 +1471,9 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1767 struct drm_i915_private *dev_priv = dev->dev_private; 1471 struct drm_i915_private *dev_priv = dev->dev_private;
1768 unsigned forcewake_count; 1472 unsigned forcewake_count;
1769 1473
1770 spin_lock_irq(&dev_priv->gt_lock); 1474 spin_lock_irq(&dev_priv->uncore.lock);
1771 forcewake_count = dev_priv->forcewake_count; 1475 forcewake_count = dev_priv->uncore.forcewake_count;
1772 spin_unlock_irq(&dev_priv->gt_lock); 1476 spin_unlock_irq(&dev_priv->uncore.lock);
1773 1477
1774 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1478 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1775 1479
@@ -1778,7 +1482,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1778 1482
1779static const char *swizzle_string(unsigned swizzle) 1483static const char *swizzle_string(unsigned swizzle)
1780{ 1484{
1781 switch(swizzle) { 1485 switch (swizzle) {
1782 case I915_BIT_6_SWIZZLE_NONE: 1486 case I915_BIT_6_SWIZZLE_NONE:
1783 return "none"; 1487 return "none";
1784 case I915_BIT_6_SWIZZLE_9: 1488 case I915_BIT_6_SWIZZLE_9:
@@ -1868,7 +1572,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1868 if (dev_priv->mm.aliasing_ppgtt) { 1572 if (dev_priv->mm.aliasing_ppgtt) {
1869 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1573 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1870 1574
1871 seq_printf(m, "aliasing PPGTT:\n"); 1575 seq_puts(m, "aliasing PPGTT:\n");
1872 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1576 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1873 } 1577 }
1874 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1578 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1886,7 +1590,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1886 1590
1887 1591
1888 if (!IS_VALLEYVIEW(dev)) { 1592 if (!IS_VALLEYVIEW(dev)) {
1889 seq_printf(m, "unsupported\n"); 1593 seq_puts(m, "unsupported\n");
1890 return 0; 1594 return 0;
1891 } 1595 }
1892 1596
@@ -1924,6 +1628,194 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1924 return 0; 1628 return 0;
1925} 1629}
1926 1630
1631static int i915_llc(struct seq_file *m, void *data)
1632{
1633 struct drm_info_node *node = (struct drm_info_node *) m->private;
1634 struct drm_device *dev = node->minor->dev;
1635 struct drm_i915_private *dev_priv = dev->dev_private;
1636
1637 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1638 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1639 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1640
1641 return 0;
1642}
1643
1644static int i915_edp_psr_status(struct seq_file *m, void *data)
1645{
1646 struct drm_info_node *node = m->private;
1647 struct drm_device *dev = node->minor->dev;
1648 struct drm_i915_private *dev_priv = dev->dev_private;
1649 u32 psrstat, psrperf;
1650
1651 if (!IS_HASWELL(dev)) {
1652 seq_puts(m, "PSR not supported on this platform\n");
1653 } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
1654 seq_puts(m, "PSR enabled\n");
1655 } else {
1656 seq_puts(m, "PSR disabled: ");
1657 switch (dev_priv->no_psr_reason) {
1658 case PSR_NO_SOURCE:
1659 seq_puts(m, "not supported on this platform");
1660 break;
1661 case PSR_NO_SINK:
1662 seq_puts(m, "not supported by panel");
1663 break;
1664 case PSR_MODULE_PARAM:
1665 seq_puts(m, "disabled by flag");
1666 break;
1667 case PSR_CRTC_NOT_ACTIVE:
1668 seq_puts(m, "crtc not active");
1669 break;
1670 case PSR_PWR_WELL_ENABLED:
1671 seq_puts(m, "power well enabled");
1672 break;
1673 case PSR_NOT_TILED:
1674 seq_puts(m, "not tiled");
1675 break;
1676 case PSR_SPRITE_ENABLED:
1677 seq_puts(m, "sprite enabled");
1678 break;
1679 case PSR_S3D_ENABLED:
1680 seq_puts(m, "stereo 3d enabled");
1681 break;
1682 case PSR_INTERLACED_ENABLED:
1683 seq_puts(m, "interlaced enabled");
1684 break;
1685 case PSR_HSW_NOT_DDIA:
1686 seq_puts(m, "HSW ties PSR to DDI A (eDP)");
1687 break;
1688 default:
1689 seq_puts(m, "unknown reason");
1690 }
1691 seq_puts(m, "\n");
1692 return 0;
1693 }
1694
1695 psrstat = I915_READ(EDP_PSR_STATUS_CTL);
1696
1697 seq_puts(m, "PSR Current State: ");
1698 switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
1699 case EDP_PSR_STATUS_STATE_IDLE:
1700 seq_puts(m, "Reset state\n");
1701 break;
1702 case EDP_PSR_STATUS_STATE_SRDONACK:
1703 seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1704 break;
1705 case EDP_PSR_STATUS_STATE_SRDENT:
1706 seq_puts(m, "SRD entry\n");
1707 break;
1708 case EDP_PSR_STATUS_STATE_BUFOFF:
1709 seq_puts(m, "Wait for buffer turn off\n");
1710 break;
1711 case EDP_PSR_STATUS_STATE_BUFON:
1712 seq_puts(m, "Wait for buffer turn on\n");
1713 break;
1714 case EDP_PSR_STATUS_STATE_AUXACK:
1715 seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
1716 break;
1717 case EDP_PSR_STATUS_STATE_SRDOFFACK:
1718 seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1719 break;
1720 default:
1721 seq_puts(m, "Unknown\n");
1722 break;
1723 }
1724
1725 seq_puts(m, "Link Status: ");
1726 switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
1727 case EDP_PSR_STATUS_LINK_FULL_OFF:
1728 seq_puts(m, "Link is fully off\n");
1729 break;
1730 case EDP_PSR_STATUS_LINK_FULL_ON:
1731 seq_puts(m, "Link is fully on\n");
1732 break;
1733 case EDP_PSR_STATUS_LINK_STANDBY:
1734 seq_puts(m, "Link is in standby\n");
1735 break;
1736 default:
1737 seq_puts(m, "Unknown\n");
1738 break;
1739 }
1740
1741 seq_printf(m, "PSR Entry Count: %u\n",
1742 psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
1743 EDP_PSR_STATUS_COUNT_MASK);
1744
1745 seq_printf(m, "Max Sleep Timer Counter: %u\n",
1746 psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
1747 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
1748
1749 seq_printf(m, "Had AUX error: %s\n",
1750 yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
1751
1752 seq_printf(m, "Sending AUX: %s\n",
1753 yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
1754
1755 seq_printf(m, "Sending Idle: %s\n",
1756 yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
1757
1758 seq_printf(m, "Sending TP2 TP3: %s\n",
1759 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
1760
1761 seq_printf(m, "Sending TP1: %s\n",
1762 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
1763
1764 seq_printf(m, "Idle Count: %u\n",
1765 psrstat & EDP_PSR_STATUS_IDLE_MASK);
1766
1767 psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
1768 seq_printf(m, "Performance Counter: %u\n", psrperf);
1769
1770 return 0;
1771}
1772
1773static int i915_energy_uJ(struct seq_file *m, void *data)
1774{
1775 struct drm_info_node *node = m->private;
1776 struct drm_device *dev = node->minor->dev;
1777 struct drm_i915_private *dev_priv = dev->dev_private;
1778 u64 power;
1779 u32 units;
1780
1781 if (INTEL_INFO(dev)->gen < 6)
1782 return -ENODEV;
1783
1784 rdmsrl(MSR_RAPL_POWER_UNIT, power);
1785 power = (power & 0x1f00) >> 8;
1786 units = 1000000 / (1 << power); /* convert to uJ */
1787 power = I915_READ(MCH_SECP_NRG_STTS);
1788 power *= units;
1789
1790 seq_printf(m, "%llu", (long long unsigned)power);
1791
1792 return 0;
1793}
1794
1795static int i915_pc8_status(struct seq_file *m, void *unused)
1796{
1797 struct drm_info_node *node = (struct drm_info_node *) m->private;
1798 struct drm_device *dev = node->minor->dev;
1799 struct drm_i915_private *dev_priv = dev->dev_private;
1800
1801 if (!IS_HASWELL(dev)) {
1802 seq_puts(m, "not supported\n");
1803 return 0;
1804 }
1805
1806 mutex_lock(&dev_priv->pc8.lock);
1807 seq_printf(m, "Requirements met: %s\n",
1808 yesno(dev_priv->pc8.requirements_met));
1809 seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
1810 seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
1811 seq_printf(m, "IRQs disabled: %s\n",
1812 yesno(dev_priv->pc8.irqs_disabled));
1813 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
1814 mutex_unlock(&dev_priv->pc8.lock);
1815
1816 return 0;
1817}
1818
1927static int 1819static int
1928i915_wedged_get(void *data, u64 *val) 1820i915_wedged_get(void *data, u64 *val)
1929{ 1821{
@@ -2006,6 +1898,8 @@ i915_drop_caches_set(void *data, u64 val)
2006 struct drm_device *dev = data; 1898 struct drm_device *dev = data;
2007 struct drm_i915_private *dev_priv = dev->dev_private; 1899 struct drm_i915_private *dev_priv = dev->dev_private;
2008 struct drm_i915_gem_object *obj, *next; 1900 struct drm_i915_gem_object *obj, *next;
1901 struct i915_address_space *vm;
1902 struct i915_vma *vma, *x;
2009 int ret; 1903 int ret;
2010 1904
2011 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); 1905 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
@@ -2026,12 +1920,17 @@ i915_drop_caches_set(void *data, u64 val)
2026 i915_gem_retire_requests(dev); 1920 i915_gem_retire_requests(dev);
2027 1921
2028 if (val & DROP_BOUND) { 1922 if (val & DROP_BOUND) {
2029 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) 1923 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2030 if (obj->pin_count == 0) { 1924 list_for_each_entry_safe(vma, x, &vm->inactive_list,
2031 ret = i915_gem_object_unbind(obj); 1925 mm_list) {
1926 if (vma->obj->pin_count)
1927 continue;
1928
1929 ret = i915_vma_unbind(vma);
2032 if (ret) 1930 if (ret)
2033 goto unlock; 1931 goto unlock;
2034 } 1932 }
1933 }
2035 } 1934 }
2036 1935
2037 if (val & DROP_UNBOUND) { 1936 if (val & DROP_UNBOUND) {
@@ -2326,6 +2225,7 @@ static struct drm_info_list i915_debugfs_list[] = {
2326 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 2225 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2327 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 2226 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2328 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 2227 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2228 {"i915_gem_stolen", i915_gem_stolen_list_info },
2329 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 2229 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2330 {"i915_gem_request", i915_gem_request_info, 0}, 2230 {"i915_gem_request", i915_gem_request_info, 0},
2331 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 2231 {"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -2353,64 +2253,42 @@ static struct drm_info_list i915_debugfs_list[] = {
2353 {"i915_swizzle_info", i915_swizzle_info, 0}, 2253 {"i915_swizzle_info", i915_swizzle_info, 0},
2354 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 2254 {"i915_ppgtt_info", i915_ppgtt_info, 0},
2355 {"i915_dpio", i915_dpio_info, 0}, 2255 {"i915_dpio", i915_dpio_info, 0},
2256 {"i915_llc", i915_llc, 0},
2257 {"i915_edp_psr_status", i915_edp_psr_status, 0},
2258 {"i915_energy_uJ", i915_energy_uJ, 0},
2259 {"i915_pc8_status", i915_pc8_status, 0},
2356}; 2260};
2357#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 2261#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2358 2262
2263static struct i915_debugfs_files {
2264 const char *name;
2265 const struct file_operations *fops;
2266} i915_debugfs_files[] = {
2267 {"i915_wedged", &i915_wedged_fops},
2268 {"i915_max_freq", &i915_max_freq_fops},
2269 {"i915_min_freq", &i915_min_freq_fops},
2270 {"i915_cache_sharing", &i915_cache_sharing_fops},
2271 {"i915_ring_stop", &i915_ring_stop_fops},
2272 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2273 {"i915_error_state", &i915_error_state_fops},
2274 {"i915_next_seqno", &i915_next_seqno_fops},
2275};
2276
2359int i915_debugfs_init(struct drm_minor *minor) 2277int i915_debugfs_init(struct drm_minor *minor)
2360{ 2278{
2361 int ret; 2279 int ret, i;
2362
2363 ret = i915_debugfs_create(minor->debugfs_root, minor,
2364 "i915_wedged",
2365 &i915_wedged_fops);
2366 if (ret)
2367 return ret;
2368 2280
2369 ret = i915_forcewake_create(minor->debugfs_root, minor); 2281 ret = i915_forcewake_create(minor->debugfs_root, minor);
2370 if (ret) 2282 if (ret)
2371 return ret; 2283 return ret;
2372 2284
2373 ret = i915_debugfs_create(minor->debugfs_root, minor, 2285 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2374 "i915_max_freq", 2286 ret = i915_debugfs_create(minor->debugfs_root, minor,
2375 &i915_max_freq_fops); 2287 i915_debugfs_files[i].name,
2376 if (ret) 2288 i915_debugfs_files[i].fops);
2377 return ret; 2289 if (ret)
2378 2290 return ret;
2379 ret = i915_debugfs_create(minor->debugfs_root, minor, 2291 }
2380 "i915_min_freq",
2381 &i915_min_freq_fops);
2382 if (ret)
2383 return ret;
2384
2385 ret = i915_debugfs_create(minor->debugfs_root, minor,
2386 "i915_cache_sharing",
2387 &i915_cache_sharing_fops);
2388 if (ret)
2389 return ret;
2390
2391 ret = i915_debugfs_create(minor->debugfs_root, minor,
2392 "i915_ring_stop",
2393 &i915_ring_stop_fops);
2394 if (ret)
2395 return ret;
2396
2397 ret = i915_debugfs_create(minor->debugfs_root, minor,
2398 "i915_gem_drop_caches",
2399 &i915_drop_caches_fops);
2400 if (ret)
2401 return ret;
2402
2403 ret = i915_debugfs_create(minor->debugfs_root, minor,
2404 "i915_error_state",
2405 &i915_error_state_fops);
2406 if (ret)
2407 return ret;
2408
2409 ret = i915_debugfs_create(minor->debugfs_root, minor,
2410 "i915_next_seqno",
2411 &i915_next_seqno_fops);
2412 if (ret)
2413 return ret;
2414 2292
2415 return drm_debugfs_create_files(i915_debugfs_list, 2293 return drm_debugfs_create_files(i915_debugfs_list,
2416 I915_DEBUGFS_ENTRIES, 2294 I915_DEBUGFS_ENTRIES,
@@ -2419,26 +2297,18 @@ int i915_debugfs_init(struct drm_minor *minor)
2419 2297
2420void i915_debugfs_cleanup(struct drm_minor *minor) 2298void i915_debugfs_cleanup(struct drm_minor *minor)
2421{ 2299{
2300 int i;
2301
2422 drm_debugfs_remove_files(i915_debugfs_list, 2302 drm_debugfs_remove_files(i915_debugfs_list,
2423 I915_DEBUGFS_ENTRIES, minor); 2303 I915_DEBUGFS_ENTRIES, minor);
2424 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 2304 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2425 1, minor); 2305 1, minor);
2426 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 2306 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2427 1, minor); 2307 struct drm_info_list *info_list =
2428 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 2308 (struct drm_info_list *) i915_debugfs_files[i].fops;
2429 1, minor); 2309
2430 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, 2310 drm_debugfs_remove_files(info_list, 1, minor);
2431 1, minor); 2311 }
2432 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
2433 1, minor);
2434 drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
2435 1, minor);
2436 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2437 1, minor);
2438 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2439 1, minor);
2440 drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
2441 1, minor);
2442} 2312}
2443 2313
2444#endif /* CONFIG_DEBUG_FS */ 2314#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index cf188ab7051a..fdaa0915ce56 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -976,6 +976,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
976 case I915_PARAM_HAS_LLC: 976 case I915_PARAM_HAS_LLC:
977 value = HAS_LLC(dev); 977 value = HAS_LLC(dev);
978 break; 978 break;
979 case I915_PARAM_HAS_WT:
980 value = HAS_WT(dev);
981 break;
979 case I915_PARAM_HAS_ALIASING_PPGTT: 982 case I915_PARAM_HAS_ALIASING_PPGTT:
980 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; 983 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
981 break; 984 break;
@@ -1293,7 +1296,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1293 1296
1294 intel_register_dsm_handler(); 1297 intel_register_dsm_handler();
1295 1298
1296 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); 1299 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
1297 if (ret) 1300 if (ret)
1298 goto cleanup_vga_client; 1301 goto cleanup_vga_client;
1299 1302
@@ -1323,10 +1326,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
1323 /* Always safe in the mode setting case. */ 1326 /* Always safe in the mode setting case. */
1324 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1327 /* FIXME: do pre/post-mode set stuff in core KMS code */
1325 dev->vblank_disable_allowed = 1; 1328 dev->vblank_disable_allowed = 1;
1326 if (INTEL_INFO(dev)->num_pipes == 0) { 1329 if (INTEL_INFO(dev)->num_pipes == 0)
1327 dev_priv->mm.suspended = 0;
1328 return 0; 1330 return 0;
1329 }
1330 1331
1331 ret = intel_fbdev_init(dev); 1332 ret = intel_fbdev_init(dev);
1332 if (ret) 1333 if (ret)
@@ -1352,9 +1353,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1352 1353
1353 drm_kms_helper_poll_init(dev); 1354 drm_kms_helper_poll_init(dev);
1354 1355
1355 /* We're off and running w/KMS */
1356 dev_priv->mm.suspended = 0;
1357
1358 return 0; 1356 return 0;
1359 1357
1360cleanup_gem: 1358cleanup_gem:
@@ -1363,7 +1361,7 @@ cleanup_gem:
1363 i915_gem_context_fini(dev); 1361 i915_gem_context_fini(dev);
1364 mutex_unlock(&dev->struct_mutex); 1362 mutex_unlock(&dev->struct_mutex);
1365 i915_gem_cleanup_aliasing_ppgtt(dev); 1363 i915_gem_cleanup_aliasing_ppgtt(dev);
1366 drm_mm_takedown(&dev_priv->mm.gtt_space); 1364 drm_mm_takedown(&dev_priv->gtt.base.mm);
1367cleanup_irq: 1365cleanup_irq:
1368 drm_irq_uninstall(dev); 1366 drm_irq_uninstall(dev);
1369cleanup_gem_stolen: 1367cleanup_gem_stolen:
@@ -1441,22 +1439,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1441} 1439}
1442 1440
1443/** 1441/**
1444 * intel_early_sanitize_regs - clean up BIOS state
1445 * @dev: DRM device
1446 *
1447 * This function must be called before we do any I915_READ or I915_WRITE. Its
1448 * purpose is to clean up any state left by the BIOS that may affect us when
1449 * reading and/or writing registers.
1450 */
1451static void intel_early_sanitize_regs(struct drm_device *dev)
1452{
1453 struct drm_i915_private *dev_priv = dev->dev_private;
1454
1455 if (HAS_FPGA_DBG_UNCLAIMED(dev))
1456 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1457}
1458
1459/**
1460 * i915_driver_load - setup chip and create an initial config 1442 * i915_driver_load - setup chip and create an initial config
1461 * @dev: DRM device 1443 * @dev: DRM device
1462 * @flags: startup flags 1444 * @flags: startup flags
@@ -1495,8 +1477,33 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1495 dev_priv->dev = dev; 1477 dev_priv->dev = dev;
1496 dev_priv->info = info; 1478 dev_priv->info = info;
1497 1479
1480 spin_lock_init(&dev_priv->irq_lock);
1481 spin_lock_init(&dev_priv->gpu_error.lock);
1482 spin_lock_init(&dev_priv->backlight.lock);
1483 spin_lock_init(&dev_priv->uncore.lock);
1484 spin_lock_init(&dev_priv->mm.object_stat_lock);
1485 mutex_init(&dev_priv->dpio_lock);
1486 mutex_init(&dev_priv->rps.hw_lock);
1487 mutex_init(&dev_priv->modeset_restore_lock);
1488
1489 mutex_init(&dev_priv->pc8.lock);
1490 dev_priv->pc8.requirements_met = false;
1491 dev_priv->pc8.gpu_idle = false;
1492 dev_priv->pc8.irqs_disabled = false;
1493 dev_priv->pc8.enabled = false;
1494 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1495 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1496
1498 i915_dump_device_info(dev_priv); 1497 i915_dump_device_info(dev_priv);
1499 1498
1499 /* Not all pre-production machines fall into this category, only the
1500 * very first ones. Almost everything should work, except for maybe
1501 * suspend/resume. And we don't implement workarounds that affect only
1502 * pre-production machines. */
1503 if (IS_HSW_EARLY_SDV(dev))
1504 DRM_INFO("This is an early pre-production Haswell machine. "
1505 "It may not be fully functional.\n");
1506
1500 if (i915_get_bridge_dev(dev)) { 1507 if (i915_get_bridge_dev(dev)) {
1501 ret = -EIO; 1508 ret = -EIO;
1502 goto free_priv; 1509 goto free_priv;
@@ -1522,7 +1529,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1522 goto put_bridge; 1529 goto put_bridge;
1523 } 1530 }
1524 1531
1525 intel_early_sanitize_regs(dev); 1532 intel_uncore_early_sanitize(dev);
1533
1534 if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
1535 /* The docs do not explain exactly how the calculation can be
1536 * made. It is somewhat guessable, but for now, it's always
1537 * 128MB.
1538 * NB: We can't write IDICR yet because we do not have gt funcs
1539 * set up */
1540 dev_priv->ellc_size = 128;
1541 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
1542 }
1526 1543
1527 ret = i915_gem_gtt_init(dev); 1544 ret = i915_gem_gtt_init(dev);
1528 if (ret) 1545 if (ret)
@@ -1558,8 +1575,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1558 goto out_rmmap; 1575 goto out_rmmap;
1559 } 1576 }
1560 1577
1561 dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, 1578 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1562 aperture_size); 1579 aperture_size);
1563 1580
1564 /* The i915 workqueue is primarily used for batched retirement of 1581 /* The i915 workqueue is primarily used for batched retirement of
1565 * requests (and thus managing bo) once the task has been completed 1582 * requests (and thus managing bo) once the task has been completed
@@ -1585,7 +1602,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1585 intel_detect_pch(dev); 1602 intel_detect_pch(dev);
1586 1603
1587 intel_irq_init(dev); 1604 intel_irq_init(dev);
1588 intel_gt_init(dev); 1605 intel_pm_init(dev);
1606 intel_uncore_sanitize(dev);
1607 intel_uncore_init(dev);
1589 1608
1590 /* Try to make sure MCHBAR is enabled before poking at it */ 1609 /* Try to make sure MCHBAR is enabled before poking at it */
1591 intel_setup_mchbar(dev); 1610 intel_setup_mchbar(dev);
@@ -1610,15 +1629,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1610 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1629 if (!IS_I945G(dev) && !IS_I945GM(dev))
1611 pci_enable_msi(dev->pdev); 1630 pci_enable_msi(dev->pdev);
1612 1631
1613 spin_lock_init(&dev_priv->irq_lock);
1614 spin_lock_init(&dev_priv->gpu_error.lock);
1615 spin_lock_init(&dev_priv->rps.lock);
1616 spin_lock_init(&dev_priv->backlight.lock);
1617 mutex_init(&dev_priv->dpio_lock);
1618
1619 mutex_init(&dev_priv->rps.hw_lock);
1620 mutex_init(&dev_priv->modeset_restore_lock);
1621
1622 dev_priv->num_plane = 1; 1632 dev_priv->num_plane = 1;
1623 if (IS_VALLEYVIEW(dev)) 1633 if (IS_VALLEYVIEW(dev))
1624 dev_priv->num_plane = 2; 1634 dev_priv->num_plane = 2;
@@ -1629,9 +1639,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1629 goto out_gem_unload; 1639 goto out_gem_unload;
1630 } 1640 }
1631 1641
1632 /* Start out suspended */
1633 dev_priv->mm.suspended = 1;
1634
1635 if (HAS_POWER_WELL(dev)) 1642 if (HAS_POWER_WELL(dev))
1636 i915_init_power_well(dev); 1643 i915_init_power_well(dev);
1637 1644
@@ -1641,6 +1648,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1641 DRM_ERROR("failed to init modeset\n"); 1648 DRM_ERROR("failed to init modeset\n");
1642 goto out_gem_unload; 1649 goto out_gem_unload;
1643 } 1650 }
1651 } else {
1652 /* Start out suspended in ums mode. */
1653 dev_priv->ums.mm_suspended = 1;
1644 } 1654 }
1645 1655
1646 i915_setup_sysfs(dev); 1656 i915_setup_sysfs(dev);
@@ -1648,7 +1658,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1648 if (INTEL_INFO(dev)->num_pipes) { 1658 if (INTEL_INFO(dev)->num_pipes) {
1649 /* Must be done after probing outputs */ 1659 /* Must be done after probing outputs */
1650 intel_opregion_init(dev); 1660 intel_opregion_init(dev);
1651 acpi_video_register_with_quirks(); 1661 acpi_video_register();
1652 } 1662 }
1653 1663
1654 if (IS_GEN5(dev)) 1664 if (IS_GEN5(dev))
@@ -1667,9 +1677,9 @@ out_gem_unload:
1667 intel_teardown_mchbar(dev); 1677 intel_teardown_mchbar(dev);
1668 destroy_workqueue(dev_priv->wq); 1678 destroy_workqueue(dev_priv->wq);
1669out_mtrrfree: 1679out_mtrrfree:
1670 arch_phys_wc_del(dev_priv->mm.gtt_mtrr); 1680 arch_phys_wc_del(dev_priv->gtt.mtrr);
1671 io_mapping_free(dev_priv->gtt.mappable); 1681 io_mapping_free(dev_priv->gtt.mappable);
1672 dev_priv->gtt.gtt_remove(dev); 1682 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1673out_rmmap: 1683out_rmmap:
1674 pci_iounmap(dev->pdev, dev_priv->regs); 1684 pci_iounmap(dev->pdev, dev_priv->regs);
1675put_bridge: 1685put_bridge:
@@ -1686,8 +1696,13 @@ int i915_driver_unload(struct drm_device *dev)
1686 1696
1687 intel_gpu_ips_teardown(); 1697 intel_gpu_ips_teardown();
1688 1698
1689 if (HAS_POWER_WELL(dev)) 1699 if (HAS_POWER_WELL(dev)) {
1700 /* The i915.ko module is still not prepared to be loaded when
1701 * the power well is not enabled, so just enable it in case
1702 * we're going to unload/reload. */
1703 intel_set_power_well(dev, true);
1690 i915_remove_power_well(dev); 1704 i915_remove_power_well(dev);
1705 }
1691 1706
1692 i915_teardown_sysfs(dev); 1707 i915_teardown_sysfs(dev);
1693 1708
@@ -1705,7 +1720,7 @@ int i915_driver_unload(struct drm_device *dev)
1705 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 1720 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1706 1721
1707 io_mapping_free(dev_priv->gtt.mappable); 1722 io_mapping_free(dev_priv->gtt.mappable);
1708 arch_phys_wc_del(dev_priv->mm.gtt_mtrr); 1723 arch_phys_wc_del(dev_priv->gtt.mtrr);
1709 1724
1710 acpi_video_unregister(); 1725 acpi_video_unregister();
1711 1726
@@ -1733,6 +1748,8 @@ int i915_driver_unload(struct drm_device *dev)
1733 cancel_work_sync(&dev_priv->gpu_error.work); 1748 cancel_work_sync(&dev_priv->gpu_error.work);
1734 i915_destroy_error_state(dev); 1749 i915_destroy_error_state(dev);
1735 1750
1751 cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
1752
1736 if (dev->pdev->msi_enabled) 1753 if (dev->pdev->msi_enabled)
1737 pci_disable_msi(dev->pdev); 1754 pci_disable_msi(dev->pdev);
1738 1755
@@ -1754,7 +1771,9 @@ int i915_driver_unload(struct drm_device *dev)
1754 i915_free_hws(dev); 1771 i915_free_hws(dev);
1755 } 1772 }
1756 1773
1757 drm_mm_takedown(&dev_priv->mm.gtt_space); 1774 list_del(&dev_priv->gtt.base.global_link);
1775 WARN_ON(!list_empty(&dev_priv->vm_list));
1776 drm_mm_takedown(&dev_priv->gtt.base.mm);
1758 if (dev_priv->regs != NULL) 1777 if (dev_priv->regs != NULL)
1759 pci_iounmap(dev->pdev, dev_priv->regs); 1778 pci_iounmap(dev->pdev, dev_priv->regs);
1760 1779
@@ -1764,7 +1783,7 @@ int i915_driver_unload(struct drm_device *dev)
1764 destroy_workqueue(dev_priv->wq); 1783 destroy_workqueue(dev_priv->wq);
1765 pm_qos_remove_request(&dev_priv->pm_qos); 1784 pm_qos_remove_request(&dev_priv->pm_qos);
1766 1785
1767 dev_priv->gtt.gtt_remove(dev); 1786 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1768 1787
1769 if (dev_priv->slab) 1788 if (dev_priv->slab)
1770 kmem_cache_destroy(dev_priv->slab); 1789 kmem_cache_destroy(dev_priv->slab);
@@ -1840,14 +1859,14 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1840 kfree(file_priv); 1859 kfree(file_priv);
1841} 1860}
1842 1861
1843struct drm_ioctl_desc i915_ioctls[] = { 1862const struct drm_ioctl_desc i915_ioctls[] = {
1844 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1863 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1845 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 1864 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1846 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), 1865 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1847 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 1866 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1848 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 1867 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1849 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 1868 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1850 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), 1869 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1851 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1870 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1852 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 1871 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1853 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 1872 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@@ -1860,35 +1879,35 @@ struct drm_ioctl_desc i915_ioctls[] = {
1860 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1879 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1861 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1880 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1862 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 1881 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1863 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), 1882 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1864 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1883 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1865 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1884 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1866 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 1885 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1867 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED), 1886 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1868 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED), 1887 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1869 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 1888 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1870 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1889 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1871 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1890 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1872 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 1891 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1873 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 1892 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1874 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 1893 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1875 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 1894 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1876 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 1895 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1877 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 1896 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1878 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 1897 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1879 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 1898 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1880 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 1899 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1881 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 1900 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1882 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 1901 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1883 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 1902 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1884 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1903 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1885 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1904 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1886 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1905 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1887 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1906 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1888 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), 1907 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1889 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), 1908 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1890 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), 1909 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1891 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED), 1910 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1892}; 1911};
1893 1912
1894int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1913int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f4af1ca0fb62..ccb28ead3501 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -118,10 +118,14 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
118MODULE_PARM_DESC(i915_enable_ppgtt, 118MODULE_PARM_DESC(i915_enable_ppgtt,
119 "Enable PPGTT (default: true)"); 119 "Enable PPGTT (default: true)");
120 120
121unsigned int i915_preliminary_hw_support __read_mostly = 0; 121int i915_enable_psr __read_mostly = 0;
122module_param_named(enable_psr, i915_enable_psr, int, 0600);
123MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
124
125unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
122module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); 126module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
123MODULE_PARM_DESC(preliminary_hw_support, 127MODULE_PARM_DESC(preliminary_hw_support,
124 "Enable preliminary hardware support. (default: false)"); 128 "Enable preliminary hardware support.");
125 129
126int i915_disable_power_well __read_mostly = 1; 130int i915_disable_power_well __read_mostly = 1;
127module_param_named(disable_power_well, i915_disable_power_well, int, 0600); 131module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
@@ -132,6 +136,24 @@ int i915_enable_ips __read_mostly = 1;
132module_param_named(enable_ips, i915_enable_ips, int, 0600); 136module_param_named(enable_ips, i915_enable_ips, int, 0600);
133MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 137MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
134 138
139bool i915_fastboot __read_mostly = 0;
140module_param_named(fastboot, i915_fastboot, bool, 0600);
141MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
142 "(default: false)");
143
144int i915_enable_pc8 __read_mostly = 1;
145module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
146MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
147
148int i915_pc8_timeout __read_mostly = 5000;
149module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
150MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
151
152bool i915_prefault_disable __read_mostly;
153module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
154MODULE_PARM_DESC(prefault_disable,
155 "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
156
135static struct drm_driver driver; 157static struct drm_driver driver;
136extern int intel_agp_enabled; 158extern int intel_agp_enabled;
137 159
@@ -543,6 +565,9 @@ static int i915_drm_freeze(struct drm_device *dev)
543 dev_priv->modeset_restore = MODESET_SUSPENDED; 565 dev_priv->modeset_restore = MODESET_SUSPENDED;
544 mutex_unlock(&dev_priv->modeset_restore_lock); 566 mutex_unlock(&dev_priv->modeset_restore_lock);
545 567
568 /* We do a lot of poking in a lot of registers, make sure they work
569 * properly. */
570 hsw_disable_package_c8(dev_priv);
546 intel_set_power_well(dev, true); 571 intel_set_power_well(dev, true);
547 572
548 drm_kms_helper_poll_disable(dev); 573 drm_kms_helper_poll_disable(dev);
@@ -551,7 +576,11 @@ static int i915_drm_freeze(struct drm_device *dev)
551 576
552 /* If KMS is active, we do the leavevt stuff here */ 577 /* If KMS is active, we do the leavevt stuff here */
553 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 578 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
554 int error = i915_gem_idle(dev); 579 int error;
580
581 mutex_lock(&dev->struct_mutex);
582 error = i915_gem_idle(dev);
583 mutex_unlock(&dev->struct_mutex);
555 if (error) { 584 if (error) {
556 dev_err(&dev->pdev->dev, 585 dev_err(&dev->pdev->dev,
557 "GEM idle failed, resume might fail\n"); 586 "GEM idle failed, resume might fail\n");
@@ -656,7 +685,6 @@ static int __i915_drm_thaw(struct drm_device *dev)
656 intel_init_pch_refclk(dev); 685 intel_init_pch_refclk(dev);
657 686
658 mutex_lock(&dev->struct_mutex); 687 mutex_lock(&dev->struct_mutex);
659 dev_priv->mm.suspended = 0;
660 688
661 error = i915_gem_init_hw(dev); 689 error = i915_gem_init_hw(dev);
662 mutex_unlock(&dev->struct_mutex); 690 mutex_unlock(&dev->struct_mutex);
@@ -696,6 +724,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
696 schedule_work(&dev_priv->console_resume_work); 724 schedule_work(&dev_priv->console_resume_work);
697 } 725 }
698 726
727 /* Undo what we did at i915_drm_freeze so the refcount goes back to the
728 * expected level. */
729 hsw_enable_package_c8(dev_priv);
730
699 mutex_lock(&dev_priv->modeset_restore_lock); 731 mutex_lock(&dev_priv->modeset_restore_lock);
700 dev_priv->modeset_restore = MODESET_DONE; 732 dev_priv->modeset_restore = MODESET_DONE;
701 mutex_unlock(&dev_priv->modeset_restore_lock); 733 mutex_unlock(&dev_priv->modeset_restore_lock);
@@ -706,7 +738,7 @@ static int i915_drm_thaw(struct drm_device *dev)
706{ 738{
707 int error = 0; 739 int error = 0;
708 740
709 intel_gt_reset(dev); 741 intel_uncore_sanitize(dev);
710 742
711 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 743 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
712 mutex_lock(&dev->struct_mutex); 744 mutex_lock(&dev->struct_mutex);
@@ -732,7 +764,7 @@ int i915_resume(struct drm_device *dev)
732 764
733 pci_set_master(dev->pdev); 765 pci_set_master(dev->pdev);
734 766
735 intel_gt_reset(dev); 767 intel_uncore_sanitize(dev);
736 768
737 /* 769 /*
738 * Platforms with opregion should have sane BIOS, older ones (gen3 and 770 * Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -753,139 +785,6 @@ int i915_resume(struct drm_device *dev)
753 return 0; 785 return 0;
754} 786}
755 787
756static int i8xx_do_reset(struct drm_device *dev)
757{
758 struct drm_i915_private *dev_priv = dev->dev_private;
759
760 if (IS_I85X(dev))
761 return -ENODEV;
762
763 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
764 POSTING_READ(D_STATE);
765
766 if (IS_I830(dev) || IS_845G(dev)) {
767 I915_WRITE(DEBUG_RESET_I830,
768 DEBUG_RESET_DISPLAY |
769 DEBUG_RESET_RENDER |
770 DEBUG_RESET_FULL);
771 POSTING_READ(DEBUG_RESET_I830);
772 msleep(1);
773
774 I915_WRITE(DEBUG_RESET_I830, 0);
775 POSTING_READ(DEBUG_RESET_I830);
776 }
777
778 msleep(1);
779
780 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
781 POSTING_READ(D_STATE);
782
783 return 0;
784}
785
786static int i965_reset_complete(struct drm_device *dev)
787{
788 u8 gdrst;
789 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
790 return (gdrst & GRDOM_RESET_ENABLE) == 0;
791}
792
793static int i965_do_reset(struct drm_device *dev)
794{
795 int ret;
796 u8 gdrst;
797
798 /*
799 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
800 * well as the reset bit (GR/bit 0). Setting the GR bit
801 * triggers the reset; when done, the hardware will clear it.
802 */
803 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
804 pci_write_config_byte(dev->pdev, I965_GDRST,
805 gdrst | GRDOM_RENDER |
806 GRDOM_RESET_ENABLE);
807 ret = wait_for(i965_reset_complete(dev), 500);
808 if (ret)
809 return ret;
810
811 /* We can't reset render&media without also resetting display ... */
812 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
813 pci_write_config_byte(dev->pdev, I965_GDRST,
814 gdrst | GRDOM_MEDIA |
815 GRDOM_RESET_ENABLE);
816
817 return wait_for(i965_reset_complete(dev), 500);
818}
819
820static int ironlake_do_reset(struct drm_device *dev)
821{
822 struct drm_i915_private *dev_priv = dev->dev_private;
823 u32 gdrst;
824 int ret;
825
826 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
827 gdrst &= ~GRDOM_MASK;
828 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
829 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
830 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
831 if (ret)
832 return ret;
833
834 /* We can't reset render&media without also resetting display ... */
835 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
836 gdrst &= ~GRDOM_MASK;
837 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
838 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
839 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
840}
841
842static int gen6_do_reset(struct drm_device *dev)
843{
844 struct drm_i915_private *dev_priv = dev->dev_private;
845 int ret;
846 unsigned long irqflags;
847
848 /* Hold gt_lock across reset to prevent any register access
849 * with forcewake not set correctly
850 */
851 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
852
853 /* Reset the chip */
854
855 /* GEN6_GDRST is not in the gt power well, no need to check
856 * for fifo space for the write or forcewake the chip for
857 * the read
858 */
859 I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
860
861 /* Spin waiting for the device to ack the reset request */
862 ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
863
864 /* If reset with a user forcewake, try to restore, otherwise turn it off */
865 if (dev_priv->forcewake_count)
866 dev_priv->gt.force_wake_get(dev_priv);
867 else
868 dev_priv->gt.force_wake_put(dev_priv);
869
870 /* Restore fifo count */
871 dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
872
873 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
874 return ret;
875}
876
877int intel_gpu_reset(struct drm_device *dev)
878{
879 switch (INTEL_INFO(dev)->gen) {
880 case 7:
881 case 6: return gen6_do_reset(dev);
882 case 5: return ironlake_do_reset(dev);
883 case 4: return i965_do_reset(dev);
884 case 2: return i8xx_do_reset(dev);
885 default: return -ENODEV;
886 }
887}
888
889/** 788/**
890 * i915_reset - reset chip after a hang 789 * i915_reset - reset chip after a hang
891 * @dev: drm device to reset 790 * @dev: drm device to reset
@@ -955,11 +854,11 @@ int i915_reset(struct drm_device *dev)
955 * switched away). 854 * switched away).
956 */ 855 */
957 if (drm_core_check_feature(dev, DRIVER_MODESET) || 856 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
958 !dev_priv->mm.suspended) { 857 !dev_priv->ums.mm_suspended) {
959 struct intel_ring_buffer *ring; 858 struct intel_ring_buffer *ring;
960 int i; 859 int i;
961 860
962 dev_priv->mm.suspended = 0; 861 dev_priv->ums.mm_suspended = 0;
963 862
964 i915_gem_init_swizzling(dev); 863 i915_gem_init_swizzling(dev);
965 864
@@ -1110,7 +1009,6 @@ static const struct file_operations i915_driver_fops = {
1110 .unlocked_ioctl = drm_ioctl, 1009 .unlocked_ioctl = drm_ioctl,
1111 .mmap = drm_gem_mmap, 1010 .mmap = drm_gem_mmap,
1112 .poll = drm_poll, 1011 .poll = drm_poll,
1113 .fasync = drm_fasync,
1114 .read = drm_read, 1012 .read = drm_read,
1115#ifdef CONFIG_COMPAT 1013#ifdef CONFIG_COMPAT
1116 .compat_ioctl = i915_compat_ioctl, 1014 .compat_ioctl = i915_compat_ioctl,
@@ -1123,8 +1021,9 @@ static struct drm_driver driver = {
1123 * deal with them for Intel hardware. 1021 * deal with them for Intel hardware.
1124 */ 1022 */
1125 .driver_features = 1023 .driver_features =
1126 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 1024 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1127 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, 1025 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1026 DRIVER_RENDER,
1128 .load = i915_driver_load, 1027 .load = i915_driver_load,
1129 .unload = i915_driver_unload, 1028 .unload = i915_driver_unload,
1130 .open = i915_driver_open, 1029 .open = i915_driver_open,
@@ -1154,7 +1053,7 @@ static struct drm_driver driver = {
1154 1053
1155 .dumb_create = i915_gem_dumb_create, 1054 .dumb_create = i915_gem_dumb_create,
1156 .dumb_map_offset = i915_gem_mmap_gtt, 1055 .dumb_map_offset = i915_gem_mmap_gtt,
1157 .dumb_destroy = i915_gem_dumb_destroy, 1056 .dumb_destroy = drm_gem_dumb_destroy,
1158 .ioctls = i915_ioctls, 1057 .ioctls = i915_ioctls,
1159 .fops = &i915_driver_fops, 1058 .fops = &i915_driver_fops,
1160 .name = DRIVER_NAME, 1059 .name = DRIVER_NAME,
@@ -1215,133 +1114,3 @@ module_exit(i915_exit);
1215MODULE_AUTHOR(DRIVER_AUTHOR); 1114MODULE_AUTHOR(DRIVER_AUTHOR);
1216MODULE_DESCRIPTION(DRIVER_DESC); 1115MODULE_DESCRIPTION(DRIVER_DESC);
1217MODULE_LICENSE("GPL and additional rights"); 1116MODULE_LICENSE("GPL and additional rights");
1218
1219/* We give fast paths for the really cool registers */
1220#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1221 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1222 ((reg) < 0x40000) && \
1223 ((reg) != FORCEWAKE))
1224static void
1225ilk_dummy_write(struct drm_i915_private *dev_priv)
1226{
1227 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1228 * the chip from rc6 before touching it for real. MI_MODE is masked,
1229 * hence harmless to write 0 into. */
1230 I915_WRITE_NOTRACE(MI_MODE, 0);
1231}
1232
1233static void
1234hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
1235{
1236 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1237 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1238 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
1239 reg);
1240 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1241 }
1242}
1243
1244static void
1245hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
1246{
1247 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1248 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1249 DRM_ERROR("Unclaimed write to %x\n", reg);
1250 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1251 }
1252}
1253
1254#define __i915_read(x, y) \
1255u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1256 u##x val = 0; \
1257 if (IS_GEN5(dev_priv->dev)) \
1258 ilk_dummy_write(dev_priv); \
1259 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1260 unsigned long irqflags; \
1261 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1262 if (dev_priv->forcewake_count == 0) \
1263 dev_priv->gt.force_wake_get(dev_priv); \
1264 val = read##y(dev_priv->regs + reg); \
1265 if (dev_priv->forcewake_count == 0) \
1266 dev_priv->gt.force_wake_put(dev_priv); \
1267 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1268 } else { \
1269 val = read##y(dev_priv->regs + reg); \
1270 } \
1271 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1272 return val; \
1273}
1274
1275__i915_read(8, b)
1276__i915_read(16, w)
1277__i915_read(32, l)
1278__i915_read(64, q)
1279#undef __i915_read
1280
1281#define __i915_write(x, y) \
1282void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1283 u32 __fifo_ret = 0; \
1284 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1285 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1286 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1287 } \
1288 if (IS_GEN5(dev_priv->dev)) \
1289 ilk_dummy_write(dev_priv); \
1290 hsw_unclaimed_reg_clear(dev_priv, reg); \
1291 write##y(val, dev_priv->regs + reg); \
1292 if (unlikely(__fifo_ret)) { \
1293 gen6_gt_check_fifodbg(dev_priv); \
1294 } \
1295 hsw_unclaimed_reg_check(dev_priv, reg); \
1296}
1297__i915_write(8, b)
1298__i915_write(16, w)
1299__i915_write(32, l)
1300__i915_write(64, q)
1301#undef __i915_write
1302
1303static const struct register_whitelist {
1304 uint64_t offset;
1305 uint32_t size;
1306 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1307} whitelist[] = {
1308 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1309};
1310
1311int i915_reg_read_ioctl(struct drm_device *dev,
1312 void *data, struct drm_file *file)
1313{
1314 struct drm_i915_private *dev_priv = dev->dev_private;
1315 struct drm_i915_reg_read *reg = data;
1316 struct register_whitelist const *entry = whitelist;
1317 int i;
1318
1319 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1320 if (entry->offset == reg->offset &&
1321 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1322 break;
1323 }
1324
1325 if (i == ARRAY_SIZE(whitelist))
1326 return -EINVAL;
1327
1328 switch (entry->size) {
1329 case 8:
1330 reg->val = I915_READ64(reg->offset);
1331 break;
1332 case 4:
1333 reg->val = I915_READ(reg->offset);
1334 break;
1335 case 2:
1336 reg->val = I915_READ16(reg->offset);
1337 break;
1338 case 1:
1339 reg->val = I915_READ8(reg->offset);
1340 break;
1341 default:
1342 WARN_ON(1);
1343 return -EINVAL;
1344 }
1345
1346 return 0;
1347}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a416645bcd23..52a3785a3fdf 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -144,6 +144,7 @@ enum intel_dpll_id {
144 144
145struct intel_dpll_hw_state { 145struct intel_dpll_hw_state {
146 uint32_t dpll; 146 uint32_t dpll;
147 uint32_t dpll_md;
147 uint32_t fp0; 148 uint32_t fp0;
148 uint32_t fp1; 149 uint32_t fp1;
149}; 150};
@@ -156,6 +157,8 @@ struct intel_shared_dpll {
156 /* should match the index in the dev_priv->shared_dplls array */ 157 /* should match the index in the dev_priv->shared_dplls array */
157 enum intel_dpll_id id; 158 enum intel_dpll_id id;
158 struct intel_dpll_hw_state hw_state; 159 struct intel_dpll_hw_state hw_state;
160 void (*mode_set)(struct drm_i915_private *dev_priv,
161 struct intel_shared_dpll *pll);
159 void (*enable)(struct drm_i915_private *dev_priv, 162 void (*enable)(struct drm_i915_private *dev_priv,
160 struct intel_shared_dpll *pll); 163 struct intel_shared_dpll *pll);
161 void (*disable)(struct drm_i915_private *dev_priv, 164 void (*disable)(struct drm_i915_private *dev_priv,
@@ -198,7 +201,6 @@ struct intel_ddi_plls {
198#define DRIVER_MINOR 6 201#define DRIVER_MINOR 6
199#define DRIVER_PATCHLEVEL 0 202#define DRIVER_PATCHLEVEL 0
200 203
201#define WATCH_COHERENCY 0
202#define WATCH_LISTS 0 204#define WATCH_LISTS 0
203#define WATCH_GTT 0 205#define WATCH_GTT 0
204 206
@@ -320,8 +322,8 @@ struct drm_i915_error_state {
320 u32 purgeable:1; 322 u32 purgeable:1;
321 s32 ring:4; 323 s32 ring:4;
322 u32 cache_level:2; 324 u32 cache_level:2;
323 } *active_bo, *pinned_bo; 325 } **active_bo, **pinned_bo;
324 u32 active_bo_count, pinned_bo_count; 326 u32 *active_bo_count, *pinned_bo_count;
325 struct intel_overlay_error_state *overlay; 327 struct intel_overlay_error_state *overlay;
326 struct intel_display_error_state *display; 328 struct intel_display_error_state *display;
327}; 329};
@@ -356,14 +358,16 @@ struct drm_i915_display_funcs {
356 struct dpll *match_clock, 358 struct dpll *match_clock,
357 struct dpll *best_clock); 359 struct dpll *best_clock);
358 void (*update_wm)(struct drm_device *dev); 360 void (*update_wm)(struct drm_device *dev);
359 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 361 void (*update_sprite_wm)(struct drm_plane *plane,
362 struct drm_crtc *crtc,
360 uint32_t sprite_width, int pixel_size, 363 uint32_t sprite_width, int pixel_size,
361 bool enable); 364 bool enable, bool scaled);
362 void (*modeset_global_resources)(struct drm_device *dev); 365 void (*modeset_global_resources)(struct drm_device *dev);
363 /* Returns the active state of the crtc, and if the crtc is active, 366 /* Returns the active state of the crtc, and if the crtc is active,
364 * fills out the pipe-config with the hw state. */ 367 * fills out the pipe-config with the hw state. */
365 bool (*get_pipe_config)(struct intel_crtc *, 368 bool (*get_pipe_config)(struct intel_crtc *,
366 struct intel_crtc_config *); 369 struct intel_crtc_config *);
370 void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
367 int (*crtc_mode_set)(struct drm_crtc *crtc, 371 int (*crtc_mode_set)(struct drm_crtc *crtc,
368 int x, int y, 372 int x, int y,
369 struct drm_framebuffer *old_fb); 373 struct drm_framebuffer *old_fb);
@@ -376,7 +380,8 @@ struct drm_i915_display_funcs {
376 void (*init_clock_gating)(struct drm_device *dev); 380 void (*init_clock_gating)(struct drm_device *dev);
377 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 381 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
378 struct drm_framebuffer *fb, 382 struct drm_framebuffer *fb,
379 struct drm_i915_gem_object *obj); 383 struct drm_i915_gem_object *obj,
384 uint32_t flags);
380 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 385 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
381 int x, int y); 386 int x, int y);
382 void (*hpd_irq_setup)(struct drm_device *dev); 387 void (*hpd_irq_setup)(struct drm_device *dev);
@@ -387,11 +392,20 @@ struct drm_i915_display_funcs {
387 /* pll clock increase/decrease */ 392 /* pll clock increase/decrease */
388}; 393};
389 394
390struct drm_i915_gt_funcs { 395struct intel_uncore_funcs {
391 void (*force_wake_get)(struct drm_i915_private *dev_priv); 396 void (*force_wake_get)(struct drm_i915_private *dev_priv);
392 void (*force_wake_put)(struct drm_i915_private *dev_priv); 397 void (*force_wake_put)(struct drm_i915_private *dev_priv);
393}; 398};
394 399
400struct intel_uncore {
401 spinlock_t lock; /** lock is also taken in irq contexts. */
402
403 struct intel_uncore_funcs funcs;
404
405 unsigned fifo_count;
406 unsigned forcewake_count;
407};
408
395#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 409#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
396 func(is_mobile) sep \ 410 func(is_mobile) sep \
397 func(is_i85x) sep \ 411 func(is_i85x) sep \
@@ -436,12 +450,64 @@ struct intel_device_info {
436 450
437enum i915_cache_level { 451enum i915_cache_level {
438 I915_CACHE_NONE = 0, 452 I915_CACHE_NONE = 0,
439 I915_CACHE_LLC, 453 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
440 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ 454 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
455 caches, eg sampler/render caches, and the
456 large Last-Level-Cache. LLC is coherent with
457 the CPU, but L3 is only visible to the GPU. */
458 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
441}; 459};
442 460
443typedef uint32_t gen6_gtt_pte_t; 461typedef uint32_t gen6_gtt_pte_t;
444 462
463struct i915_address_space {
464 struct drm_mm mm;
465 struct drm_device *dev;
466 struct list_head global_link;
467 unsigned long start; /* Start offset always 0 for dri2 */
468 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
469
470 struct {
471 dma_addr_t addr;
472 struct page *page;
473 } scratch;
474
475 /**
476 * List of objects currently involved in rendering.
477 *
478 * Includes buffers having the contents of their GPU caches
479 * flushed, not necessarily primitives. last_rendering_seqno
480 * represents when the rendering involved will be completed.
481 *
482 * A reference is held on the buffer while on this list.
483 */
484 struct list_head active_list;
485
486 /**
487 * LRU list of objects which are not in the ringbuffer and
488 * are ready to unbind, but are still in the GTT.
489 *
490 * last_rendering_seqno is 0 while an object is in this list.
491 *
492 * A reference is not held on the buffer while on this list,
493 * as merely being GTT-bound shouldn't prevent its being
494 * freed, and we'll pull it off the list in the free path.
495 */
496 struct list_head inactive_list;
497
498 /* FIXME: Need a more generic return type */
499 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
500 enum i915_cache_level level);
501 void (*clear_range)(struct i915_address_space *vm,
502 unsigned int first_entry,
503 unsigned int num_entries);
504 void (*insert_entries)(struct i915_address_space *vm,
505 struct sg_table *st,
506 unsigned int first_entry,
507 enum i915_cache_level cache_level);
508 void (*cleanup)(struct i915_address_space *vm);
509};
510
445/* The Graphics Translation Table is the way in which GEN hardware translates a 511/* The Graphics Translation Table is the way in which GEN hardware translates a
446 * Graphics Virtual Address into a Physical Address. In addition to the normal 512 * Graphics Virtual Address into a Physical Address. In addition to the normal
447 * collateral associated with any va->pa translations GEN hardware also has a 513 * collateral associated with any va->pa translations GEN hardware also has a
@@ -450,8 +516,7 @@ typedef uint32_t gen6_gtt_pte_t;
450 * the spec. 516 * the spec.
451 */ 517 */
452struct i915_gtt { 518struct i915_gtt {
453 unsigned long start; /* Start offset of used GTT */ 519 struct i915_address_space base;
454 size_t total; /* Total size GTT can map */
455 size_t stolen_size; /* Total size of stolen memory */ 520 size_t stolen_size; /* Total size of stolen memory */
456 521
457 unsigned long mappable_end; /* End offset that we can CPU map */ 522 unsigned long mappable_end; /* End offset that we can CPU map */
@@ -462,50 +527,47 @@ struct i915_gtt {
462 void __iomem *gsm; 527 void __iomem *gsm;
463 528
464 bool do_idle_maps; 529 bool do_idle_maps;
465 dma_addr_t scratch_page_dma; 530
466 struct page *scratch_page; 531 int mtrr;
467 532
468 /* global gtt ops */ 533 /* global gtt ops */
469 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, 534 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
470 size_t *stolen, phys_addr_t *mappable_base, 535 size_t *stolen, phys_addr_t *mappable_base,
471 unsigned long *mappable_end); 536 unsigned long *mappable_end);
472 void (*gtt_remove)(struct drm_device *dev);
473 void (*gtt_clear_range)(struct drm_device *dev,
474 unsigned int first_entry,
475 unsigned int num_entries);
476 void (*gtt_insert_entries)(struct drm_device *dev,
477 struct sg_table *st,
478 unsigned int pg_start,
479 enum i915_cache_level cache_level);
480 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
481 dma_addr_t addr,
482 enum i915_cache_level level);
483}; 537};
484#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) 538#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
485 539
486#define I915_PPGTT_PD_ENTRIES 512
487#define I915_PPGTT_PT_ENTRIES 1024
488struct i915_hw_ppgtt { 540struct i915_hw_ppgtt {
489 struct drm_device *dev; 541 struct i915_address_space base;
490 unsigned num_pd_entries; 542 unsigned num_pd_entries;
491 struct page **pt_pages; 543 struct page **pt_pages;
492 uint32_t pd_offset; 544 uint32_t pd_offset;
493 dma_addr_t *pt_dma_addr; 545 dma_addr_t *pt_dma_addr;
494 dma_addr_t scratch_page_dma_addr;
495 546
496 /* pte functions, mirroring the interface of the global gtt. */
497 void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
498 unsigned int first_entry,
499 unsigned int num_entries);
500 void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
501 struct sg_table *st,
502 unsigned int pg_start,
503 enum i915_cache_level cache_level);
504 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
505 dma_addr_t addr,
506 enum i915_cache_level level);
507 int (*enable)(struct drm_device *dev); 547 int (*enable)(struct drm_device *dev);
508 void (*cleanup)(struct i915_hw_ppgtt *ppgtt); 548};
549
550/**
551 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
552 * VMA's presence cannot be guaranteed before binding, or after unbinding the
553 * object into/from the address space.
554 *
555 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
556 * will always be <= an objects lifetime. So object refcounting should cover us.
557 */
558struct i915_vma {
559 struct drm_mm_node node;
560 struct drm_i915_gem_object *obj;
561 struct i915_address_space *vm;
562
563 /** This object's place on the active/inactive lists */
564 struct list_head mm_list;
565
566 struct list_head vma_link; /* Link in the object's VMA list */
567
568 /** This vma's place in the batchbuffer or on the eviction list */
569 struct list_head exec_list;
570
509}; 571};
510 572
511struct i915_ctx_hang_stats { 573struct i915_ctx_hang_stats {
@@ -528,15 +590,48 @@ struct i915_hw_context {
528 struct i915_ctx_hang_stats hang_stats; 590 struct i915_ctx_hang_stats hang_stats;
529}; 591};
530 592
531enum no_fbc_reason { 593struct i915_fbc {
532 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 594 unsigned long size;
533 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ 595 unsigned int fb_id;
534 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 596 enum plane plane;
535 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 597 int y;
536 FBC_BAD_PLANE, /* fbc not supported on plane */ 598
537 FBC_NOT_TILED, /* buffer not tiled */ 599 struct drm_mm_node *compressed_fb;
538 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 600 struct drm_mm_node *compressed_llb;
539 FBC_MODULE_PARAM, 601
602 struct intel_fbc_work {
603 struct delayed_work work;
604 struct drm_crtc *crtc;
605 struct drm_framebuffer *fb;
606 int interval;
607 } *fbc_work;
608
609 enum no_fbc_reason {
610 FBC_OK, /* FBC is enabled */
611 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
612 FBC_NO_OUTPUT, /* no outputs enabled to compress */
613 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
614 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
615 FBC_MODE_TOO_LARGE, /* mode too large for compression */
616 FBC_BAD_PLANE, /* fbc not supported on plane */
617 FBC_NOT_TILED, /* buffer not tiled */
618 FBC_MULTIPLE_PIPES, /* more than one pipe active */
619 FBC_MODULE_PARAM,
620 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
621 } no_fbc_reason;
622};
623
624enum no_psr_reason {
625 PSR_NO_SOURCE, /* Not supported on platform */
626 PSR_NO_SINK, /* Not supported by panel */
627 PSR_MODULE_PARAM,
628 PSR_CRTC_NOT_ACTIVE,
629 PSR_PWR_WELL_ENABLED,
630 PSR_NOT_TILED,
631 PSR_SPRITE_ENABLED,
632 PSR_S3D_ENABLED,
633 PSR_INTERLACED_ENABLED,
634 PSR_HSW_NOT_DDIA,
540}; 635};
541 636
542enum intel_pch { 637enum intel_pch {
@@ -555,6 +650,7 @@ enum intel_sbi_destination {
555#define QUIRK_PIPEA_FORCE (1<<0) 650#define QUIRK_PIPEA_FORCE (1<<0)
556#define QUIRK_LVDS_SSC_DISABLE (1<<1) 651#define QUIRK_LVDS_SSC_DISABLE (1<<1)
557#define QUIRK_INVERT_BRIGHTNESS (1<<2) 652#define QUIRK_INVERT_BRIGHTNESS (1<<2)
653#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
558 654
559struct intel_fbdev; 655struct intel_fbdev;
560struct intel_fbc_work; 656struct intel_fbc_work;
@@ -721,12 +817,12 @@ struct i915_suspend_saved_registers {
721}; 817};
722 818
723struct intel_gen6_power_mgmt { 819struct intel_gen6_power_mgmt {
820 /* work and pm_iir are protected by dev_priv->irq_lock */
724 struct work_struct work; 821 struct work_struct work;
725 struct delayed_work vlv_work;
726 u32 pm_iir; 822 u32 pm_iir;
727 /* lock - irqsave spinlock that protectects the work_struct and 823
728 * pm_iir. */ 824 /* On vlv we need to manually drop to Vmin with a delayed work. */
729 spinlock_t lock; 825 struct delayed_work vlv_work;
730 826
731 /* The below variables an all the rps hw state are protected by 827 /* The below variables an all the rps hw state are protected by
732 * dev->struct mutext. */ 828 * dev->struct mutext. */
@@ -792,6 +888,18 @@ struct i915_dri1_state {
792 uint32_t counter; 888 uint32_t counter;
793}; 889};
794 890
891struct i915_ums_state {
892 /**
893 * Flag if the X Server, and thus DRM, is not currently in
894 * control of the device.
895 *
896 * This is set between LeaveVT and EnterVT. It needs to be
897 * replaced with a semaphore. It also needs to be
898 * transitioned away from for kernel modesetting.
899 */
900 int mm_suspended;
901};
902
795struct intel_l3_parity { 903struct intel_l3_parity {
796 u32 *remap_info; 904 u32 *remap_info;
797 struct work_struct error_work; 905 struct work_struct error_work;
@@ -800,8 +908,6 @@ struct intel_l3_parity {
800struct i915_gem_mm { 908struct i915_gem_mm {
801 /** Memory allocator for GTT stolen memory */ 909 /** Memory allocator for GTT stolen memory */
802 struct drm_mm stolen; 910 struct drm_mm stolen;
803 /** Memory allocator for GTT */
804 struct drm_mm gtt_space;
805 /** List of all objects in gtt_space. Used to restore gtt 911 /** List of all objects in gtt_space. Used to restore gtt
806 * mappings on resume */ 912 * mappings on resume */
807 struct list_head bound_list; 913 struct list_head bound_list;
@@ -815,37 +921,12 @@ struct i915_gem_mm {
815 /** Usable portion of the GTT for GEM */ 921 /** Usable portion of the GTT for GEM */
816 unsigned long stolen_base; /* limited to low memory (32-bit) */ 922 unsigned long stolen_base; /* limited to low memory (32-bit) */
817 923
818 int gtt_mtrr;
819
820 /** PPGTT used for aliasing the PPGTT with the GTT */ 924 /** PPGTT used for aliasing the PPGTT with the GTT */
821 struct i915_hw_ppgtt *aliasing_ppgtt; 925 struct i915_hw_ppgtt *aliasing_ppgtt;
822 926
823 struct shrinker inactive_shrinker; 927 struct shrinker inactive_shrinker;
824 bool shrinker_no_lock_stealing; 928 bool shrinker_no_lock_stealing;
825 929
826 /**
827 * List of objects currently involved in rendering.
828 *
829 * Includes buffers having the contents of their GPU caches
830 * flushed, not necessarily primitives. last_rendering_seqno
831 * represents when the rendering involved will be completed.
832 *
833 * A reference is held on the buffer while on this list.
834 */
835 struct list_head active_list;
836
837 /**
838 * LRU list of objects which are not in the ringbuffer and
839 * are ready to unbind, but are still in the GTT.
840 *
841 * last_rendering_seqno is 0 while an object is in this list.
842 *
843 * A reference is not held on the buffer while on this list,
844 * as merely being GTT-bound shouldn't prevent its being
845 * freed, and we'll pull it off the list in the free path.
846 */
847 struct list_head inactive_list;
848
849 /** LRU list of objects with fence regs on them. */ 930 /** LRU list of objects with fence regs on them. */
850 struct list_head fence_list; 931 struct list_head fence_list;
851 932
@@ -864,16 +945,6 @@ struct i915_gem_mm {
864 */ 945 */
865 bool interruptible; 946 bool interruptible;
866 947
867 /**
868 * Flag if the X Server, and thus DRM, is not currently in
869 * control of the device.
870 *
871 * This is set between LeaveVT and EnterVT. It needs to be
872 * replaced with a semaphore. It also needs to be
873 * transitioned away from for kernel modesetting.
874 */
875 int suspended;
876
877 /** Bit 6 swizzling required for X tiling */ 948 /** Bit 6 swizzling required for X tiling */
878 uint32_t bit_6_swizzle_x; 949 uint32_t bit_6_swizzle_x;
879 /** Bit 6 swizzling required for Y tiling */ 950 /** Bit 6 swizzling required for Y tiling */
@@ -883,6 +954,7 @@ struct i915_gem_mm {
883 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 954 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
884 955
885 /* accounting, useful for userland debugging */ 956 /* accounting, useful for userland debugging */
957 spinlock_t object_stat_lock;
886 size_t object_memory; 958 size_t object_memory;
887 u32 object_count; 959 u32 object_count;
888}; 960};
@@ -896,6 +968,11 @@ struct drm_i915_error_state_buf {
896 loff_t pos; 968 loff_t pos;
897}; 969};
898 970
971struct i915_error_state_file_priv {
972 struct drm_device *dev;
973 struct drm_i915_error_state *error;
974};
975
899struct i915_gpu_error { 976struct i915_gpu_error {
900 /* For hangcheck timer */ 977 /* For hangcheck timer */
901#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 978#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -987,6 +1064,88 @@ struct intel_vbt_data {
987 struct child_device_config *child_dev; 1064 struct child_device_config *child_dev;
988}; 1065};
989 1066
1067enum intel_ddb_partitioning {
1068 INTEL_DDB_PART_1_2,
1069 INTEL_DDB_PART_5_6, /* IVB+ */
1070};
1071
1072struct intel_wm_level {
1073 bool enable;
1074 uint32_t pri_val;
1075 uint32_t spr_val;
1076 uint32_t cur_val;
1077 uint32_t fbc_val;
1078};
1079
1080/*
1081 * This struct tracks the state needed for the Package C8+ feature.
1082 *
1083 * Package states C8 and deeper are really deep PC states that can only be
1084 * reached when all the devices on the system allow it, so even if the graphics
1085 * device allows PC8+, it doesn't mean the system will actually get to these
1086 * states.
1087 *
1088 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1089 * is disabled and the GPU is idle. When these conditions are met, we manually
1090 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1091 * refclk to Fclk.
1092 *
1093 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1094 * the state of some registers, so when we come back from PC8+ we need to
1095 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1096 * need to take care of the registers kept by RC6.
1097 *
1098 * The interrupt disabling is part of the requirements. We can only leave the
1099 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1100 * can lock the machine.
1101 *
1102 * Ideally every piece of our code that needs PC8+ disabled would call
1103 * hsw_disable_package_c8, which would increment disable_count and prevent the
1104 * system from reaching PC8+. But we don't have a symmetric way to do this for
1105 * everything, so we have the requirements_met and gpu_idle variables. When we
1106 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1107 * increase it in the opposite case. The requirements_met variable is true when
1108 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1109 * variable is true when the GPU is idle.
1110 *
1111 * In addition to everything, we only actually enable PC8+ if disable_count
1112 * stays at zero for at least some seconds. This is implemented with the
1113 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1114 * consecutive times when all screens are disabled and some background app
1115 * queries the state of our connectors, or we have some application constantly
1116 * waking up to use the GPU. Only after the enable_work function actually
1117 * enables PC8+ the "enable" variable will become true, which means that it can
1118 * be false even if disable_count is 0.
1119 *
1120 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1121 * goes back to false exactly before we reenable the IRQs. We use this variable
1122 * to check if someone is trying to enable/disable IRQs while they're supposed
1123 * to be disabled. This shouldn't happen and we'll print some error messages in
1124 * case it happens, but if it actually happens we'll also update the variables
1125 * inside struct regsave so when we restore the IRQs they will contain the
1126 * latest expected values.
1127 *
1128 * For more, read "Display Sequences for Package C8" on our documentation.
1129 */
1130struct i915_package_c8 {
1131 bool requirements_met;
1132 bool gpu_idle;
1133 bool irqs_disabled;
1134 /* Only true after the delayed work task actually enables it. */
1135 bool enabled;
1136 int disable_count;
1137 struct mutex lock;
1138 struct delayed_work enable_work;
1139
1140 struct {
1141 uint32_t deimr;
1142 uint32_t sdeimr;
1143 uint32_t gtimr;
1144 uint32_t gtier;
1145 uint32_t gen6_pmimr;
1146 } regsave;
1147};
1148
990typedef struct drm_i915_private { 1149typedef struct drm_i915_private {
991 struct drm_device *dev; 1150 struct drm_device *dev;
992 struct kmem_cache *slab; 1151 struct kmem_cache *slab;
@@ -997,14 +1156,7 @@ typedef struct drm_i915_private {
997 1156
998 void __iomem *regs; 1157 void __iomem *regs;
999 1158
1000 struct drm_i915_gt_funcs gt; 1159 struct intel_uncore uncore;
1001 /** gt_fifo_count and the subsequent register write are synchronized
1002 * with dev->struct_mutex. */
1003 unsigned gt_fifo_count;
1004 /** forcewake_count is protected by gt_lock */
1005 unsigned forcewake_count;
1006 /** gt_lock is also taken in irq contexts. */
1007 spinlock_t gt_lock;
1008 1160
1009 struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; 1161 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1010 1162
@@ -1041,6 +1193,7 @@ typedef struct drm_i915_private {
1041 /** Cached value of IMR to avoid reads in updating the bitfield */ 1193 /** Cached value of IMR to avoid reads in updating the bitfield */
1042 u32 irq_mask; 1194 u32 irq_mask;
1043 u32 gt_irq_mask; 1195 u32 gt_irq_mask;
1196 u32 pm_irq_mask;
1044 1197
1045 struct work_struct hotplug_work; 1198 struct work_struct hotplug_work;
1046 bool enable_hotplug_processing; 1199 bool enable_hotplug_processing;
@@ -1058,12 +1211,7 @@ typedef struct drm_i915_private {
1058 1211
1059 int num_plane; 1212 int num_plane;
1060 1213
1061 unsigned long cfb_size; 1214 struct i915_fbc fbc;
1062 unsigned int cfb_fb;
1063 enum plane cfb_plane;
1064 int cfb_y;
1065 struct intel_fbc_work *fbc_work;
1066
1067 struct intel_opregion opregion; 1215 struct intel_opregion opregion;
1068 struct intel_vbt_data vbt; 1216 struct intel_vbt_data vbt;
1069 1217
@@ -1080,8 +1228,6 @@ typedef struct drm_i915_private {
1080 } backlight; 1228 } backlight;
1081 1229
1082 /* LVDS info */ 1230 /* LVDS info */
1083 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1084 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1085 bool no_aux_handshake; 1231 bool no_aux_handshake;
1086 1232
1087 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1233 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
@@ -1104,7 +1250,8 @@ typedef struct drm_i915_private {
1104 enum modeset_restore modeset_restore; 1250 enum modeset_restore modeset_restore;
1105 struct mutex modeset_restore_lock; 1251 struct mutex modeset_restore_lock;
1106 1252
1107 struct i915_gtt gtt; 1253 struct list_head vm_list; /* Global list of all address spaces */
1254 struct i915_gtt gtt; /* VMA representing the global address space */
1108 1255
1109 struct i915_gem_mm mm; 1256 struct i915_gem_mm mm;
1110 1257
@@ -1131,6 +1278,9 @@ typedef struct drm_i915_private {
1131 1278
1132 struct intel_l3_parity l3_parity; 1279 struct intel_l3_parity l3_parity;
1133 1280
1281 /* Cannot be determined by PCIID. You must always read a register. */
1282 size_t ellc_size;
1283
1134 /* gen6+ rps state */ 1284 /* gen6+ rps state */
1135 struct intel_gen6_power_mgmt rps; 1285 struct intel_gen6_power_mgmt rps;
1136 1286
@@ -1141,10 +1291,7 @@ typedef struct drm_i915_private {
1141 /* Haswell power well */ 1291 /* Haswell power well */
1142 struct i915_power_well power_well; 1292 struct i915_power_well power_well;
1143 1293
1144 enum no_fbc_reason no_fbc_reason; 1294 enum no_psr_reason no_psr_reason;
1145
1146 struct drm_mm_node *compressed_fb;
1147 struct drm_mm_node *compressed_llb;
1148 1295
1149 struct i915_gpu_error gpu_error; 1296 struct i915_gpu_error gpu_error;
1150 1297
@@ -1169,11 +1316,34 @@ typedef struct drm_i915_private {
1169 1316
1170 struct i915_suspend_saved_registers regfile; 1317 struct i915_suspend_saved_registers regfile;
1171 1318
1319 struct {
1320 /*
1321 * Raw watermark latency values:
1322 * in 0.1us units for WM0,
1323 * in 0.5us units for WM1+.
1324 */
1325 /* primary */
1326 uint16_t pri_latency[5];
1327 /* sprite */
1328 uint16_t spr_latency[5];
1329 /* cursor */
1330 uint16_t cur_latency[5];
1331 } wm;
1332
1333 struct i915_package_c8 pc8;
1334
1172 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1335 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1173 * here! */ 1336 * here! */
1174 struct i915_dri1_state dri1; 1337 struct i915_dri1_state dri1;
1338 /* Old ums support infrastructure, same warning applies. */
1339 struct i915_ums_state ums;
1175} drm_i915_private_t; 1340} drm_i915_private_t;
1176 1341
1342static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1343{
1344 return dev->dev_private;
1345}
1346
1177/* Iterate over initialised rings */ 1347/* Iterate over initialised rings */
1178#define for_each_ring(ring__, dev_priv__, i__) \ 1348#define for_each_ring(ring__, dev_priv__, i__) \
1179 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1349 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@@ -1186,7 +1356,7 @@ enum hdmi_force_audio {
1186 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1356 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1187}; 1357};
1188 1358
1189#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) 1359#define I915_GTT_OFFSET_NONE ((u32)-1)
1190 1360
1191struct drm_i915_gem_object_ops { 1361struct drm_i915_gem_object_ops {
1192 /* Interface between the GEM object and its backing storage. 1362 /* Interface between the GEM object and its backing storage.
@@ -1211,15 +1381,16 @@ struct drm_i915_gem_object {
1211 1381
1212 const struct drm_i915_gem_object_ops *ops; 1382 const struct drm_i915_gem_object_ops *ops;
1213 1383
1214 /** Current space allocated to this object in the GTT, if any. */ 1384 /** List of VMAs backed by this object */
1215 struct drm_mm_node *gtt_space; 1385 struct list_head vma_list;
1386
1216 /** Stolen memory for this object, instead of being backed by shmem. */ 1387 /** Stolen memory for this object, instead of being backed by shmem. */
1217 struct drm_mm_node *stolen; 1388 struct drm_mm_node *stolen;
1218 struct list_head global_list; 1389 struct list_head global_list;
1219 1390
1220 /** This object's place on the active/inactive lists */
1221 struct list_head ring_list; 1391 struct list_head ring_list;
1222 struct list_head mm_list; 1392 /** Used in execbuf to temporarily hold a ref */
1393 struct list_head obj_exec_link;
1223 /** This object's place in the batchbuffer or on the eviction list */ 1394 /** This object's place in the batchbuffer or on the eviction list */
1224 struct list_head exec_list; 1395 struct list_head exec_list;
1225 1396
@@ -1286,6 +1457,7 @@ struct drm_i915_gem_object {
1286 */ 1457 */
1287 unsigned int fault_mappable:1; 1458 unsigned int fault_mappable:1;
1288 unsigned int pin_mappable:1; 1459 unsigned int pin_mappable:1;
1460 unsigned int pin_display:1;
1289 1461
1290 /* 1462 /*
1291 * Is the GPU currently using a fence to access this buffer, 1463 * Is the GPU currently using a fence to access this buffer,
@@ -1293,7 +1465,7 @@ struct drm_i915_gem_object {
1293 unsigned int pending_fenced_gpu_access:1; 1465 unsigned int pending_fenced_gpu_access:1;
1294 unsigned int fenced_gpu_access:1; 1466 unsigned int fenced_gpu_access:1;
1295 1467
1296 unsigned int cache_level:2; 1468 unsigned int cache_level:3;
1297 1469
1298 unsigned int has_aliasing_ppgtt_mapping:1; 1470 unsigned int has_aliasing_ppgtt_mapping:1;
1299 unsigned int has_global_gtt_mapping:1; 1471 unsigned int has_global_gtt_mapping:1;
@@ -1313,13 +1485,6 @@ struct drm_i915_gem_object {
1313 unsigned long exec_handle; 1485 unsigned long exec_handle;
1314 struct drm_i915_gem_exec_object2 *exec_entry; 1486 struct drm_i915_gem_exec_object2 *exec_entry;
1315 1487
1316 /**
1317 * Current offset of the object in GTT space.
1318 *
1319 * This is the same as gtt_space->start
1320 */
1321 uint32_t gtt_offset;
1322
1323 struct intel_ring_buffer *ring; 1488 struct intel_ring_buffer *ring;
1324 1489
1325 /** Breadcrumb of last rendering to the buffer. */ 1490 /** Breadcrumb of last rendering to the buffer. */
@@ -1395,7 +1560,7 @@ struct drm_i915_file_private {
1395 struct i915_ctx_hang_stats hang_stats; 1560 struct i915_ctx_hang_stats hang_stats;
1396}; 1561};
1397 1562
1398#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1563#define INTEL_INFO(dev) (to_i915(dev)->info)
1399 1564
1400#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1565#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1401#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1566#define IS_845G(dev) ((dev)->pci_device == 0x2562)
@@ -1413,7 +1578,6 @@ struct drm_i915_file_private {
1413#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1578#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1414#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1579#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1415#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1580#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1416#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1417#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1581#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1418#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1582#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1419#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ 1583#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
@@ -1425,6 +1589,8 @@ struct drm_i915_file_private {
1425#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1589#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1426#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1590#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1427#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1591#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1592#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1593 ((dev)->pci_device & 0xFF00) == 0x0C00)
1428#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1594#define IS_ULT(dev) (IS_HASWELL(dev) && \
1429 ((dev)->pci_device & 0xFF00) == 0x0A00) 1595 ((dev)->pci_device & 0xFF00) == 0x0A00)
1430 1596
@@ -1445,6 +1611,7 @@ struct drm_i915_file_private {
1445#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 1611#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1446#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) 1612#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
1447#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1613#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1614#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1448#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1615#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1449 1616
1450#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 1617#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -1467,8 +1634,6 @@ struct drm_i915_file_private {
1467#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1634#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1468#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1635#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1469#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1636#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1470/* dsparb controlled by hw only */
1471#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1472 1637
1473#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 1638#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1474#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1639#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
@@ -1476,8 +1641,6 @@ struct drm_i915_file_private {
1476 1641
1477#define HAS_IPS(dev) (IS_ULT(dev)) 1642#define HAS_IPS(dev) (IS_ULT(dev))
1478 1643
1479#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1480
1481#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1644#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1482#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1645#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1483#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1646#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
@@ -1489,7 +1652,7 @@ struct drm_i915_file_private {
1489#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 1652#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1490#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 1653#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1491 1654
1492#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1655#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
1493#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1656#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1494#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1657#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1495#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1658#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -1525,7 +1688,7 @@ struct drm_i915_file_private {
1525#define INTEL_RC6p_ENABLE (1<<1) 1688#define INTEL_RC6p_ENABLE (1<<1)
1526#define INTEL_RC6pp_ENABLE (1<<2) 1689#define INTEL_RC6pp_ENABLE (1<<2)
1527 1690
1528extern struct drm_ioctl_desc i915_ioctls[]; 1691extern const struct drm_ioctl_desc i915_ioctls[];
1529extern int i915_max_ioctl; 1692extern int i915_max_ioctl;
1530extern unsigned int i915_fbpercrtc __always_unused; 1693extern unsigned int i915_fbpercrtc __always_unused;
1531extern int i915_panel_ignore_lid __read_mostly; 1694extern int i915_panel_ignore_lid __read_mostly;
@@ -1539,9 +1702,14 @@ extern int i915_enable_rc6 __read_mostly;
1539extern int i915_enable_fbc __read_mostly; 1702extern int i915_enable_fbc __read_mostly;
1540extern bool i915_enable_hangcheck __read_mostly; 1703extern bool i915_enable_hangcheck __read_mostly;
1541extern int i915_enable_ppgtt __read_mostly; 1704extern int i915_enable_ppgtt __read_mostly;
1705extern int i915_enable_psr __read_mostly;
1542extern unsigned int i915_preliminary_hw_support __read_mostly; 1706extern unsigned int i915_preliminary_hw_support __read_mostly;
1543extern int i915_disable_power_well __read_mostly; 1707extern int i915_disable_power_well __read_mostly;
1544extern int i915_enable_ips __read_mostly; 1708extern int i915_enable_ips __read_mostly;
1709extern bool i915_fastboot __read_mostly;
1710extern int i915_enable_pc8 __read_mostly;
1711extern int i915_pc8_timeout __read_mostly;
1712extern bool i915_prefault_disable __read_mostly;
1545 1713
1546extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1714extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1547extern int i915_resume(struct drm_device *dev); 1715extern int i915_resume(struct drm_device *dev);
@@ -1577,15 +1745,19 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1577extern void intel_console_resume(struct work_struct *work); 1745extern void intel_console_resume(struct work_struct *work);
1578 1746
1579/* i915_irq.c */ 1747/* i915_irq.c */
1580void i915_hangcheck_elapsed(unsigned long data); 1748void i915_queue_hangcheck(struct drm_device *dev);
1581void i915_handle_error(struct drm_device *dev, bool wedged); 1749void i915_handle_error(struct drm_device *dev, bool wedged);
1582 1750
1583extern void intel_irq_init(struct drm_device *dev); 1751extern void intel_irq_init(struct drm_device *dev);
1752extern void intel_pm_init(struct drm_device *dev);
1584extern void intel_hpd_init(struct drm_device *dev); 1753extern void intel_hpd_init(struct drm_device *dev);
1585extern void intel_gt_init(struct drm_device *dev); 1754extern void intel_pm_init(struct drm_device *dev);
1586extern void intel_gt_reset(struct drm_device *dev);
1587 1755
1588void i915_error_state_free(struct kref *error_ref); 1756extern void intel_uncore_sanitize(struct drm_device *dev);
1757extern void intel_uncore_early_sanitize(struct drm_device *dev);
1758extern void intel_uncore_init(struct drm_device *dev);
1759extern void intel_uncore_clear_errors(struct drm_device *dev);
1760extern void intel_uncore_check_errors(struct drm_device *dev);
1589 1761
1590void 1762void
1591i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1763i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1593,13 +1765,6 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1593void 1765void
1594i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1766i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1595 1767
1596#ifdef CONFIG_DEBUG_FS
1597extern void i915_destroy_error_state(struct drm_device *dev);
1598#else
1599#define i915_destroy_error_state(x)
1600#endif
1601
1602
1603/* i915_gem.c */ 1768/* i915_gem.c */
1604int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1769int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1605 struct drm_file *file_priv); 1770 struct drm_file *file_priv);
@@ -1656,13 +1821,18 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
1656struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1821struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1657 size_t size); 1822 size_t size);
1658void i915_gem_free_object(struct drm_gem_object *obj); 1823void i915_gem_free_object(struct drm_gem_object *obj);
1824struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
1825 struct i915_address_space *vm);
1826void i915_gem_vma_destroy(struct i915_vma *vma);
1659 1827
1660int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1828int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1829 struct i915_address_space *vm,
1661 uint32_t alignment, 1830 uint32_t alignment,
1662 bool map_and_fenceable, 1831 bool map_and_fenceable,
1663 bool nonblocking); 1832 bool nonblocking);
1664void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1833void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1665int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1834int __must_check i915_vma_unbind(struct i915_vma *vma);
1835int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
1666int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 1836int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
1667void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1837void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1668void i915_gem_lastclose(struct drm_device *dev); 1838void i915_gem_lastclose(struct drm_device *dev);
@@ -1699,8 +1869,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
1699 struct drm_mode_create_dumb *args); 1869 struct drm_mode_create_dumb *args);
1700int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 1870int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1701 uint32_t handle, uint64_t *offset); 1871 uint32_t handle, uint64_t *offset);
1702int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
1703 uint32_t handle);
1704/** 1872/**
1705 * Returns true if seq1 is later than seq2. 1873 * Returns true if seq1 is later than seq2.
1706 */ 1874 */
@@ -1752,10 +1920,7 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1752} 1920}
1753 1921
1754void i915_gem_reset(struct drm_device *dev); 1922void i915_gem_reset(struct drm_device *dev);
1755void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1923bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
1756int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1757 uint32_t read_domains,
1758 uint32_t write_domain);
1759int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1924int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1760int __must_check i915_gem_init(struct drm_device *dev); 1925int __must_check i915_gem_init(struct drm_device *dev);
1761int __must_check i915_gem_init_hw(struct drm_device *dev); 1926int __must_check i915_gem_init_hw(struct drm_device *dev);
@@ -1782,6 +1947,7 @@ int __must_check
1782i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1947i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1783 u32 alignment, 1948 u32 alignment,
1784 struct intel_ring_buffer *pipelined); 1949 struct intel_ring_buffer *pipelined);
1950void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
1785int i915_gem_attach_phys_object(struct drm_device *dev, 1951int i915_gem_attach_phys_object(struct drm_device *dev,
1786 struct drm_i915_gem_object *obj, 1952 struct drm_i915_gem_object *obj,
1787 int id, 1953 int id,
@@ -1808,6 +1974,56 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1808 1974
1809void i915_gem_restore_fences(struct drm_device *dev); 1975void i915_gem_restore_fences(struct drm_device *dev);
1810 1976
1977unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
1978 struct i915_address_space *vm);
1979bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
1980bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
1981 struct i915_address_space *vm);
1982unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
1983 struct i915_address_space *vm);
1984struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
1985 struct i915_address_space *vm);
1986struct i915_vma *
1987i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
1988 struct i915_address_space *vm);
1989/* Some GGTT VM helpers */
1990#define obj_to_ggtt(obj) \
1991 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
1992static inline bool i915_is_ggtt(struct i915_address_space *vm)
1993{
1994 struct i915_address_space *ggtt =
1995 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
1996 return vm == ggtt;
1997}
1998
1999static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2000{
2001 return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
2002}
2003
2004static inline unsigned long
2005i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2006{
2007 return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
2008}
2009
2010static inline unsigned long
2011i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2012{
2013 return i915_gem_obj_size(obj, obj_to_ggtt(obj));
2014}
2015
2016static inline int __must_check
2017i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2018 uint32_t alignment,
2019 bool map_and_fenceable,
2020 bool nonblocking)
2021{
2022 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2023 map_and_fenceable, nonblocking);
2024}
2025#undef obj_to_ggtt
2026
1811/* i915_gem_context.c */ 2027/* i915_gem_context.c */
1812void i915_gem_context_init(struct drm_device *dev); 2028void i915_gem_context_init(struct drm_device *dev);
1813void i915_gem_context_fini(struct drm_device *dev); 2029void i915_gem_context_fini(struct drm_device *dev);
@@ -1826,7 +2042,7 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
1826} 2042}
1827 2043
1828struct i915_ctx_hang_stats * __must_check 2044struct i915_ctx_hang_stats * __must_check
1829i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, 2045i915_gem_context_get_hang_stats(struct drm_device *dev,
1830 struct drm_file *file, 2046 struct drm_file *file,
1831 u32 id); 2047 u32 id);
1832int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2048int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -1860,7 +2076,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
1860 2076
1861 2077
1862/* i915_gem_evict.c */ 2078/* i915_gem_evict.c */
1863int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 2079int __must_check i915_gem_evict_something(struct drm_device *dev,
2080 struct i915_address_space *vm,
2081 int min_size,
1864 unsigned alignment, 2082 unsigned alignment,
1865 unsigned cache_level, 2083 unsigned cache_level,
1866 bool mappable, 2084 bool mappable,
@@ -1882,7 +2100,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
1882void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); 2100void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
1883 2101
1884/* i915_gem_tiling.c */ 2102/* i915_gem_tiling.c */
1885inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 2103static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1886{ 2104{
1887 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2105 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1888 2106
@@ -1895,23 +2113,36 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1895void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 2113void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1896 2114
1897/* i915_gem_debug.c */ 2115/* i915_gem_debug.c */
1898void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1899 const char *where, uint32_t mark);
1900#if WATCH_LISTS 2116#if WATCH_LISTS
1901int i915_verify_lists(struct drm_device *dev); 2117int i915_verify_lists(struct drm_device *dev);
1902#else 2118#else
1903#define i915_verify_lists(dev) 0 2119#define i915_verify_lists(dev) 0
1904#endif 2120#endif
1905void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
1906 int handle);
1907void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1908 const char *where, uint32_t mark);
1909 2121
1910/* i915_debugfs.c */ 2122/* i915_debugfs.c */
1911int i915_debugfs_init(struct drm_minor *minor); 2123int i915_debugfs_init(struct drm_minor *minor);
1912void i915_debugfs_cleanup(struct drm_minor *minor); 2124void i915_debugfs_cleanup(struct drm_minor *minor);
2125
2126/* i915_gpu_error.c */
1913__printf(2, 3) 2127__printf(2, 3)
1914void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 2128void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
2129int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2130 const struct i915_error_state_file_priv *error);
2131int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2132 size_t count, loff_t pos);
2133static inline void i915_error_state_buf_release(
2134 struct drm_i915_error_state_buf *eb)
2135{
2136 kfree(eb->buf);
2137}
2138void i915_capture_error_state(struct drm_device *dev);
2139void i915_error_state_get(struct drm_device *dev,
2140 struct i915_error_state_file_priv *error_priv);
2141void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2142void i915_destroy_error_state(struct drm_device *dev);
2143
2144void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2145const char *i915_cache_level_str(int type);
1915 2146
1916/* i915_suspend.c */ 2147/* i915_suspend.c */
1917extern int i915_save_state(struct drm_device *dev); 2148extern int i915_save_state(struct drm_device *dev);
@@ -1991,7 +2222,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1991 struct drm_file *file); 2222 struct drm_file *file);
1992 2223
1993/* overlay */ 2224/* overlay */
1994#ifdef CONFIG_DEBUG_FS
1995extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 2225extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1996extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 2226extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
1997 struct intel_overlay_error_state *error); 2227 struct intel_overlay_error_state *error);
@@ -2000,7 +2230,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc
2000extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 2230extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2001 struct drm_device *dev, 2231 struct drm_device *dev,
2002 struct intel_display_error_state *error); 2232 struct intel_display_error_state *error);
2003#endif
2004 2233
2005/* On SNB platform, before reading ring registers forcewake bit 2234/* On SNB platform, before reading ring registers forcewake bit
2006 * must be set to prevent GT core from power down and stale values being 2235 * must be set to prevent GT core from power down and stale values being
@@ -2008,7 +2237,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2008 */ 2237 */
2009void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 2238void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
2010void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 2239void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
2011int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
2012 2240
2013int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 2241int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2014int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 2242int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2027,39 +2255,37 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2027int vlv_gpu_freq(int ddr_freq, int val); 2255int vlv_gpu_freq(int ddr_freq, int val);
2028int vlv_freq_opcode(int ddr_freq, int val); 2256int vlv_freq_opcode(int ddr_freq, int val);
2029 2257
2030#define __i915_read(x, y) \ 2258#define __i915_read(x) \
2031 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 2259 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
2032 2260__i915_read(8)
2033__i915_read(8, b) 2261__i915_read(16)
2034__i915_read(16, w) 2262__i915_read(32)
2035__i915_read(32, l) 2263__i915_read(64)
2036__i915_read(64, q)
2037#undef __i915_read 2264#undef __i915_read
2038 2265
2039#define __i915_write(x, y) \ 2266#define __i915_write(x) \
2040 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); 2267 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
2041 2268__i915_write(8)
2042__i915_write(8, b) 2269__i915_write(16)
2043__i915_write(16, w) 2270__i915_write(32)
2044__i915_write(32, l) 2271__i915_write(64)
2045__i915_write(64, q)
2046#undef __i915_write 2272#undef __i915_write
2047 2273
2048#define I915_READ8(reg) i915_read8(dev_priv, (reg)) 2274#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
2049#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) 2275#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
2050 2276
2051#define I915_READ16(reg) i915_read16(dev_priv, (reg)) 2277#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
2052#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) 2278#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
2053#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) 2279#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
2054#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) 2280#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
2055 2281
2056#define I915_READ(reg) i915_read32(dev_priv, (reg)) 2282#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
2057#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) 2283#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
2058#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) 2284#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
2059#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) 2285#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
2060 2286
2061#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) 2287#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
2062#define I915_READ64(reg) i915_read64(dev_priv, (reg)) 2288#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
2063 2289
2064#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 2290#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2065#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 2291#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 97afd2639fb6..2d1cb10d846f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include <drm/drm_vma_manager.h>
29#include <drm/i915_drm.h> 30#include <drm/i915_drm.h>
30#include "i915_drv.h" 31#include "i915_drv.h"
31#include "i915_trace.h" 32#include "i915_trace.h"
@@ -37,11 +38,14 @@
37#include <linux/dma-buf.h> 38#include <linux/dma-buf.h>
38 39
39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
41static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 42 bool force);
42 unsigned alignment, 43static __must_check int
43 bool map_and_fenceable, 44i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
44 bool nonblocking); 45 struct i915_address_space *vm,
46 unsigned alignment,
47 bool map_and_fenceable,
48 bool nonblocking);
45static int i915_gem_phys_pwrite(struct drm_device *dev, 49static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj, 50 struct drm_i915_gem_object *obj,
47 struct drm_i915_gem_pwrite *args, 51 struct drm_i915_gem_pwrite *args,
@@ -59,6 +63,20 @@ static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); 63static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
60static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 64static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
61 65
66static bool cpu_cache_is_coherent(struct drm_device *dev,
67 enum i915_cache_level level)
68{
69 return HAS_LLC(dev) || level != I915_CACHE_NONE;
70}
71
72static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
73{
74 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
75 return true;
76
77 return obj->pin_display;
78}
79
62static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) 80static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
63{ 81{
64 if (obj->tiling_mode) 82 if (obj->tiling_mode)
@@ -75,15 +93,19 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
75static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 93static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
76 size_t size) 94 size_t size)
77{ 95{
96 spin_lock(&dev_priv->mm.object_stat_lock);
78 dev_priv->mm.object_count++; 97 dev_priv->mm.object_count++;
79 dev_priv->mm.object_memory += size; 98 dev_priv->mm.object_memory += size;
99 spin_unlock(&dev_priv->mm.object_stat_lock);
80} 100}
81 101
82static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 102static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83 size_t size) 103 size_t size)
84{ 104{
105 spin_lock(&dev_priv->mm.object_stat_lock);
85 dev_priv->mm.object_count--; 106 dev_priv->mm.object_count--;
86 dev_priv->mm.object_memory -= size; 107 dev_priv->mm.object_memory -= size;
108 spin_unlock(&dev_priv->mm.object_stat_lock);
87} 109}
88 110
89static int 111static int
@@ -135,7 +157,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
135static inline bool 157static inline bool
136i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) 158i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
137{ 159{
138 return obj->gtt_space && !obj->active; 160 return i915_gem_obj_bound_any(obj) && !obj->active;
139} 161}
140 162
141int 163int
@@ -178,10 +200,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
178 mutex_lock(&dev->struct_mutex); 200 mutex_lock(&dev->struct_mutex);
179 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 201 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
180 if (obj->pin_count) 202 if (obj->pin_count)
181 pinned += obj->gtt_space->size; 203 pinned += i915_gem_obj_ggtt_size(obj);
182 mutex_unlock(&dev->struct_mutex); 204 mutex_unlock(&dev->struct_mutex);
183 205
184 args->aper_size = dev_priv->gtt.total; 206 args->aper_size = dev_priv->gtt.base.total;
185 args->aper_available_size = args->aper_size - pinned; 207 args->aper_available_size = args->aper_size - pinned;
186 208
187 return 0; 209 return 0;
@@ -219,16 +241,10 @@ i915_gem_create(struct drm_file *file,
219 return -ENOMEM; 241 return -ENOMEM;
220 242
221 ret = drm_gem_handle_create(file, &obj->base, &handle); 243 ret = drm_gem_handle_create(file, &obj->base, &handle);
222 if (ret) {
223 drm_gem_object_release(&obj->base);
224 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
225 i915_gem_object_free(obj);
226 return ret;
227 }
228
229 /* drop reference from allocate - handle holds it now */ 244 /* drop reference from allocate - handle holds it now */
230 drm_gem_object_unreference(&obj->base); 245 drm_gem_object_unreference_unlocked(&obj->base);
231 trace_i915_gem_object_create(obj); 246 if (ret)
247 return ret;
232 248
233 *handle_p = handle; 249 *handle_p = handle;
234 return 0; 250 return 0;
@@ -246,13 +262,6 @@ i915_gem_dumb_create(struct drm_file *file,
246 args->size, &args->handle); 262 args->size, &args->handle);
247} 263}
248 264
249int i915_gem_dumb_destroy(struct drm_file *file,
250 struct drm_device *dev,
251 uint32_t handle)
252{
253 return drm_gem_handle_delete(file, handle);
254}
255
256/** 265/**
257 * Creates a new mm object and returns a handle to it. 266 * Creates a new mm object and returns a handle to it.
258 */ 267 */
@@ -420,9 +429,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
420 * read domain and manually flush cachelines (if required). This 429 * read domain and manually flush cachelines (if required). This
421 * optimizes for the case when the gpu will dirty the data 430 * optimizes for the case when the gpu will dirty the data
422 * anyway again before the next pread happens. */ 431 * anyway again before the next pread happens. */
423 if (obj->cache_level == I915_CACHE_NONE) 432 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
424 needs_clflush = 1; 433 if (i915_gem_obj_bound_any(obj)) {
425 if (obj->gtt_space) {
426 ret = i915_gem_object_set_to_gtt_domain(obj, false); 434 ret = i915_gem_object_set_to_gtt_domain(obj, false);
427 if (ret) 435 if (ret)
428 return ret; 436 return ret;
@@ -465,7 +473,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
465 473
466 mutex_unlock(&dev->struct_mutex); 474 mutex_unlock(&dev->struct_mutex);
467 475
468 if (!prefaulted) { 476 if (likely(!i915_prefault_disable) && !prefaulted) {
469 ret = fault_in_multipages_writeable(user_data, remain); 477 ret = fault_in_multipages_writeable(user_data, remain);
470 /* Userspace is tricking us, but we've already clobbered 478 /* Userspace is tricking us, but we've already clobbered
471 * its pages with the prefault and promised to write the 479 * its pages with the prefault and promised to write the
@@ -594,7 +602,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
594 char __user *user_data; 602 char __user *user_data;
595 int page_offset, page_length, ret; 603 int page_offset, page_length, ret;
596 604
597 ret = i915_gem_object_pin(obj, 0, true, true); 605 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
598 if (ret) 606 if (ret)
599 goto out; 607 goto out;
600 608
@@ -609,7 +617,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
609 user_data = to_user_ptr(args->data_ptr); 617 user_data = to_user_ptr(args->data_ptr);
610 remain = args->size; 618 remain = args->size;
611 619
612 offset = obj->gtt_offset + args->offset; 620 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
613 621
614 while (remain > 0) { 622 while (remain > 0) {
615 /* Operation in this page 623 /* Operation in this page
@@ -737,19 +745,18 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
737 * write domain and manually flush cachelines (if required). This 745 * write domain and manually flush cachelines (if required). This
738 * optimizes for the case when the gpu will use the data 746 * optimizes for the case when the gpu will use the data
739 * right away and we therefore have to clflush anyway. */ 747 * right away and we therefore have to clflush anyway. */
740 if (obj->cache_level == I915_CACHE_NONE) 748 needs_clflush_after = cpu_write_needs_clflush(obj);
741 needs_clflush_after = 1; 749 if (i915_gem_obj_bound_any(obj)) {
742 if (obj->gtt_space) {
743 ret = i915_gem_object_set_to_gtt_domain(obj, true); 750 ret = i915_gem_object_set_to_gtt_domain(obj, true);
744 if (ret) 751 if (ret)
745 return ret; 752 return ret;
746 } 753 }
747 } 754 }
748 /* Same trick applies for invalidate partially written cachelines before 755 /* Same trick applies to invalidate partially written cachelines read
749 * writing. */ 756 * before writing. */
750 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) 757 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
751 && obj->cache_level == I915_CACHE_NONE) 758 needs_clflush_before =
752 needs_clflush_before = 1; 759 !cpu_cache_is_coherent(dev, obj->cache_level);
753 760
754 ret = i915_gem_object_get_pages(obj); 761 ret = i915_gem_object_get_pages(obj);
755 if (ret) 762 if (ret)
@@ -828,8 +835,8 @@ out:
828 */ 835 */
829 if (!needs_clflush_after && 836 if (!needs_clflush_after &&
830 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 837 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
831 i915_gem_clflush_object(obj); 838 if (i915_gem_clflush_object(obj, obj->pin_display))
832 i915_gem_chipset_flush(dev); 839 i915_gem_chipset_flush(dev);
833 } 840 }
834 } 841 }
835 842
@@ -860,10 +867,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
860 args->size)) 867 args->size))
861 return -EFAULT; 868 return -EFAULT;
862 869
863 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), 870 if (likely(!i915_prefault_disable)) {
864 args->size); 871 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
865 if (ret) 872 args->size);
866 return -EFAULT; 873 if (ret)
874 return -EFAULT;
875 }
867 876
868 ret = i915_mutex_lock_interruptible(dev); 877 ret = i915_mutex_lock_interruptible(dev);
869 if (ret) 878 if (ret)
@@ -904,9 +913,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
904 goto out; 913 goto out;
905 } 914 }
906 915
907 if (obj->cache_level == I915_CACHE_NONE && 916 if (obj->tiling_mode == I915_TILING_NONE &&
908 obj->tiling_mode == I915_TILING_NONE && 917 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
909 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 918 cpu_write_needs_clflush(obj)) {
910 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 919 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
911 /* Note that the gtt paths might fail with non-page-backed user 920 /* Note that the gtt paths might fail with non-page-backed user
912 * pointers (e.g. gtt mappings when moving data between 921 * pointers (e.g. gtt mappings when moving data between
@@ -990,6 +999,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
990 bool wait_forever = true; 999 bool wait_forever = true;
991 int ret; 1000 int ret;
992 1001
1002 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1003
993 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1004 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
994 return 0; 1005 return 0;
995 1006
@@ -1255,8 +1266,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1255 } 1266 }
1256 1267
1257 /* Pinned buffers may be scanout, so flush the cache */ 1268 /* Pinned buffers may be scanout, so flush the cache */
1258 if (obj->pin_count) 1269 if (obj->pin_display)
1259 i915_gem_object_flush_cpu_write_domain(obj); 1270 i915_gem_object_flush_cpu_write_domain(obj, true);
1260 1271
1261 drm_gem_object_unreference(&obj->base); 1272 drm_gem_object_unreference(&obj->base);
1262unlock: 1273unlock:
@@ -1346,7 +1357,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1346 } 1357 }
1347 1358
1348 /* Now bind it into the GTT if needed */ 1359 /* Now bind it into the GTT if needed */
1349 ret = i915_gem_object_pin(obj, 0, true, false); 1360 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
1350 if (ret) 1361 if (ret)
1351 goto unlock; 1362 goto unlock;
1352 1363
@@ -1360,8 +1371,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1360 1371
1361 obj->fault_mappable = true; 1372 obj->fault_mappable = true;
1362 1373
1363 pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + 1374 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1364 page_offset; 1375 pfn >>= PAGE_SHIFT;
1376 pfn += page_offset;
1365 1377
1366 /* Finally, remap it using the new GTT offset */ 1378 /* Finally, remap it using the new GTT offset */
1367 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1379 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1425,11 +1437,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1425 if (!obj->fault_mappable) 1437 if (!obj->fault_mappable)
1426 return; 1438 return;
1427 1439
1428 if (obj->base.dev->dev_mapping) 1440 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
1429 unmap_mapping_range(obj->base.dev->dev_mapping,
1430 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1431 obj->base.size, 1);
1432
1433 obj->fault_mappable = false; 1441 obj->fault_mappable = false;
1434} 1442}
1435 1443
@@ -1485,7 +1493,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1485 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1493 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1486 int ret; 1494 int ret;
1487 1495
1488 if (obj->base.map_list.map) 1496 if (drm_vma_node_has_offset(&obj->base.vma_node))
1489 return 0; 1497 return 0;
1490 1498
1491 dev_priv->mm.shrinker_no_lock_stealing = true; 1499 dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -1516,9 +1524,6 @@ out:
1516 1524
1517static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 1525static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1518{ 1526{
1519 if (!obj->base.map_list.map)
1520 return;
1521
1522 drm_gem_free_mmap_offset(&obj->base); 1527 drm_gem_free_mmap_offset(&obj->base);
1523} 1528}
1524 1529
@@ -1557,7 +1562,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1557 if (ret) 1562 if (ret)
1558 goto out; 1563 goto out;
1559 1564
1560 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; 1565 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1561 1566
1562out: 1567out:
1563 drm_gem_object_unreference(&obj->base); 1568 drm_gem_object_unreference(&obj->base);
@@ -1632,7 +1637,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1632 * hope for the best. 1637 * hope for the best.
1633 */ 1638 */
1634 WARN_ON(ret != -EIO); 1639 WARN_ON(ret != -EIO);
1635 i915_gem_clflush_object(obj); 1640 i915_gem_clflush_object(obj, true);
1636 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 1641 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1637 } 1642 }
1638 1643
@@ -1667,11 +1672,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1667 if (obj->pages == NULL) 1672 if (obj->pages == NULL)
1668 return 0; 1673 return 0;
1669 1674
1670 BUG_ON(obj->gtt_space);
1671
1672 if (obj->pages_pin_count) 1675 if (obj->pages_pin_count)
1673 return -EBUSY; 1676 return -EBUSY;
1674 1677
1678 BUG_ON(i915_gem_obj_bound_any(obj));
1679
1675 /* ->put_pages might need to allocate memory for the bit17 swizzle 1680 /* ->put_pages might need to allocate memory for the bit17 swizzle
1676 * array, hence protect them from being reaped by removing them from gtt 1681 * array, hence protect them from being reaped by removing them from gtt
1677 * lists early. */ 1682 * lists early. */
@@ -1704,12 +1709,18 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1704 } 1709 }
1705 } 1710 }
1706 1711
1707 list_for_each_entry_safe(obj, next, 1712 list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
1708 &dev_priv->mm.inactive_list, 1713 global_list) {
1709 mm_list) { 1714 struct i915_vma *vma, *v;
1710 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && 1715
1711 i915_gem_object_unbind(obj) == 0 && 1716 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1712 i915_gem_object_put_pages(obj) == 0) { 1717 continue;
1718
1719 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1720 if (i915_vma_unbind(vma))
1721 break;
1722
1723 if (!i915_gem_object_put_pages(obj)) {
1713 count += obj->base.size >> PAGE_SHIFT; 1724 count += obj->base.size >> PAGE_SHIFT;
1714 if (count >= target) 1725 if (count >= target)
1715 return count; 1726 return count;
@@ -1892,8 +1903,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1892 obj->active = 1; 1903 obj->active = 1;
1893 } 1904 }
1894 1905
1895 /* Move from whatever list we were on to the tail of execution. */
1896 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1897 list_move_tail(&obj->ring_list, &ring->active_list); 1906 list_move_tail(&obj->ring_list, &ring->active_list);
1898 1907
1899 obj->last_read_seqno = seqno; 1908 obj->last_read_seqno = seqno;
@@ -1915,13 +1924,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1915static void 1924static void
1916i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 1925i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1917{ 1926{
1918 struct drm_device *dev = obj->base.dev; 1927 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1919 struct drm_i915_private *dev_priv = dev->dev_private; 1928 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1929 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1920 1930
1921 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 1931 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1922 BUG_ON(!obj->active); 1932 BUG_ON(!obj->active);
1923 1933
1924 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1934 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
1925 1935
1926 list_del_init(&obj->ring_list); 1936 list_del_init(&obj->ring_list);
1927 obj->ring = NULL; 1937 obj->ring = NULL;
@@ -2085,11 +2095,9 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2085 trace_i915_gem_request_add(ring, request->seqno); 2095 trace_i915_gem_request_add(ring, request->seqno);
2086 ring->outstanding_lazy_request = 0; 2096 ring->outstanding_lazy_request = 0;
2087 2097
2088 if (!dev_priv->mm.suspended) { 2098 if (!dev_priv->ums.mm_suspended) {
2089 if (i915_enable_hangcheck) { 2099 i915_queue_hangcheck(ring->dev);
2090 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2100
2091 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2092 }
2093 if (was_empty) { 2101 if (was_empty) {
2094 queue_delayed_work(dev_priv->wq, 2102 queue_delayed_work(dev_priv->wq,
2095 &dev_priv->mm.retire_work, 2103 &dev_priv->mm.retire_work,
@@ -2119,10 +2127,11 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2119 spin_unlock(&file_priv->mm.lock); 2127 spin_unlock(&file_priv->mm.lock);
2120} 2128}
2121 2129
2122static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) 2130static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2131 struct i915_address_space *vm)
2123{ 2132{
2124 if (acthd >= obj->gtt_offset && 2133 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2125 acthd < obj->gtt_offset + obj->base.size) 2134 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2126 return true; 2135 return true;
2127 2136
2128 return false; 2137 return false;
@@ -2145,6 +2154,17 @@ static bool i915_head_inside_request(const u32 acthd_unmasked,
2145 return false; 2154 return false;
2146} 2155}
2147 2156
2157static struct i915_address_space *
2158request_to_vm(struct drm_i915_gem_request *request)
2159{
2160 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2161 struct i915_address_space *vm;
2162
2163 vm = &dev_priv->gtt.base;
2164
2165 return vm;
2166}
2167
2148static bool i915_request_guilty(struct drm_i915_gem_request *request, 2168static bool i915_request_guilty(struct drm_i915_gem_request *request,
2149 const u32 acthd, bool *inside) 2169 const u32 acthd, bool *inside)
2150{ 2170{
@@ -2152,9 +2172,9 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
2152 * pointing inside the ring, matches the batch_obj address range. 2172 * pointing inside the ring, matches the batch_obj address range.
2153 * However this is extremely unlikely. 2173 * However this is extremely unlikely.
2154 */ 2174 */
2155
2156 if (request->batch_obj) { 2175 if (request->batch_obj) {
2157 if (i915_head_inside_object(acthd, request->batch_obj)) { 2176 if (i915_head_inside_object(acthd, request->batch_obj,
2177 request_to_vm(request))) {
2158 *inside = true; 2178 *inside = true;
2159 return true; 2179 return true;
2160 } 2180 }
@@ -2174,17 +2194,21 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
2174{ 2194{
2175 struct i915_ctx_hang_stats *hs = NULL; 2195 struct i915_ctx_hang_stats *hs = NULL;
2176 bool inside, guilty; 2196 bool inside, guilty;
2197 unsigned long offset = 0;
2177 2198
2178 /* Innocent until proven guilty */ 2199 /* Innocent until proven guilty */
2179 guilty = false; 2200 guilty = false;
2180 2201
2181 if (ring->hangcheck.action != wait && 2202 if (request->batch_obj)
2203 offset = i915_gem_obj_offset(request->batch_obj,
2204 request_to_vm(request));
2205
2206 if (ring->hangcheck.action != HANGCHECK_WAIT &&
2182 i915_request_guilty(request, acthd, &inside)) { 2207 i915_request_guilty(request, acthd, &inside)) {
2183 DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n", 2208 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2184 ring->name, 2209 ring->name,
2185 inside ? "inside" : "flushing", 2210 inside ? "inside" : "flushing",
2186 request->batch_obj ? 2211 offset,
2187 request->batch_obj->gtt_offset : 0,
2188 request->ctx ? request->ctx->id : 0, 2212 request->ctx ? request->ctx->id : 0,
2189 acthd); 2213 acthd);
2190 2214
@@ -2258,30 +2282,29 @@ void i915_gem_restore_fences(struct drm_device *dev)
2258 2282
2259 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2283 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2260 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2284 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2261 i915_gem_write_fence(dev, i, reg->obj); 2285
2286 /*
2287 * Commit delayed tiling changes if we have an object still
2288 * attached to the fence, otherwise just clear the fence.
2289 */
2290 if (reg->obj) {
2291 i915_gem_object_update_fence(reg->obj, reg,
2292 reg->obj->tiling_mode);
2293 } else {
2294 i915_gem_write_fence(dev, i, NULL);
2295 }
2262 } 2296 }
2263} 2297}
2264 2298
2265void i915_gem_reset(struct drm_device *dev) 2299void i915_gem_reset(struct drm_device *dev)
2266{ 2300{
2267 struct drm_i915_private *dev_priv = dev->dev_private; 2301 struct drm_i915_private *dev_priv = dev->dev_private;
2268 struct drm_i915_gem_object *obj;
2269 struct intel_ring_buffer *ring; 2302 struct intel_ring_buffer *ring;
2270 int i; 2303 int i;
2271 2304
2272 for_each_ring(ring, dev_priv, i) 2305 for_each_ring(ring, dev_priv, i)
2273 i915_gem_reset_ring_lists(dev_priv, ring); 2306 i915_gem_reset_ring_lists(dev_priv, ring);
2274 2307
2275 /* Move everything out of the GPU domains to ensure we do any
2276 * necessary invalidation upon reuse.
2277 */
2278 list_for_each_entry(obj,
2279 &dev_priv->mm.inactive_list,
2280 mm_list)
2281 {
2282 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2283 }
2284
2285 i915_gem_restore_fences(dev); 2308 i915_gem_restore_fences(dev);
2286} 2309}
2287 2310
@@ -2390,7 +2413,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
2390 idle &= list_empty(&ring->request_list); 2413 idle &= list_empty(&ring->request_list);
2391 } 2414 }
2392 2415
2393 if (!dev_priv->mm.suspended && !idle) 2416 if (!dev_priv->ums.mm_suspended && !idle)
2394 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2417 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2395 round_jiffies_up_relative(HZ)); 2418 round_jiffies_up_relative(HZ));
2396 if (idle) 2419 if (idle)
@@ -2576,18 +2599,18 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2576 old_write_domain); 2599 old_write_domain);
2577} 2600}
2578 2601
2579/** 2602int i915_vma_unbind(struct i915_vma *vma)
2580 * Unbinds an object from the GTT aperture.
2581 */
2582int
2583i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2584{ 2603{
2604 struct drm_i915_gem_object *obj = vma->obj;
2585 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2605 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2586 int ret; 2606 int ret;
2587 2607
2588 if (obj->gtt_space == NULL) 2608 if (list_empty(&vma->vma_link))
2589 return 0; 2609 return 0;
2590 2610
2611 if (!drm_mm_node_allocated(&vma->node))
2612 goto destroy;
2613
2591 if (obj->pin_count) 2614 if (obj->pin_count)
2592 return -EBUSY; 2615 return -EBUSY;
2593 2616
@@ -2608,7 +2631,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2608 if (ret) 2631 if (ret)
2609 return ret; 2632 return ret;
2610 2633
2611 trace_i915_gem_object_unbind(obj); 2634 trace_i915_vma_unbind(vma);
2612 2635
2613 if (obj->has_global_gtt_mapping) 2636 if (obj->has_global_gtt_mapping)
2614 i915_gem_gtt_unbind_object(obj); 2637 i915_gem_gtt_unbind_object(obj);
@@ -2619,18 +2642,46 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2619 i915_gem_gtt_finish_object(obj); 2642 i915_gem_gtt_finish_object(obj);
2620 i915_gem_object_unpin_pages(obj); 2643 i915_gem_object_unpin_pages(obj);
2621 2644
2622 list_del(&obj->mm_list); 2645 list_del(&vma->mm_list);
2623 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2624 /* Avoid an unnecessary call to unbind on rebind. */ 2646 /* Avoid an unnecessary call to unbind on rebind. */
2625 obj->map_and_fenceable = true; 2647 if (i915_is_ggtt(vma->vm))
2648 obj->map_and_fenceable = true;
2649
2650 drm_mm_remove_node(&vma->node);
2626 2651
2627 drm_mm_put_block(obj->gtt_space); 2652destroy:
2628 obj->gtt_space = NULL; 2653 i915_gem_vma_destroy(vma);
2629 obj->gtt_offset = 0; 2654
2655 /* Since the unbound list is global, only move to that list if
2656 * no more VMAs exist.
2657 * NB: Until we have real VMAs there will only ever be one */
2658 WARN_ON(!list_empty(&obj->vma_list));
2659 if (list_empty(&obj->vma_list))
2660 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2630 2661
2631 return 0; 2662 return 0;
2632} 2663}
2633 2664
2665/**
2666 * Unbinds an object from the global GTT aperture.
2667 */
2668int
2669i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2670{
2671 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2672 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2673
2674 if (!i915_gem_obj_ggtt_bound(obj))
2675 return 0;
2676
2677 if (obj->pin_count)
2678 return -EBUSY;
2679
2680 BUG_ON(obj->pages == NULL);
2681
2682 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2683}
2684
2634int i915_gpu_idle(struct drm_device *dev) 2685int i915_gpu_idle(struct drm_device *dev)
2635{ 2686{
2636 drm_i915_private_t *dev_priv = dev->dev_private; 2687 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2681,12 +2732,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2681 POSTING_READ(fence_reg); 2732 POSTING_READ(fence_reg);
2682 2733
2683 if (obj) { 2734 if (obj) {
2684 u32 size = obj->gtt_space->size; 2735 u32 size = i915_gem_obj_ggtt_size(obj);
2685 uint64_t val; 2736 uint64_t val;
2686 2737
2687 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2738 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2688 0xfffff000) << 32; 2739 0xfffff000) << 32;
2689 val |= obj->gtt_offset & 0xfffff000; 2740 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2690 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; 2741 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2691 if (obj->tiling_mode == I915_TILING_Y) 2742 if (obj->tiling_mode == I915_TILING_Y)
2692 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2743 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2710,15 +2761,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
2710 u32 val; 2761 u32 val;
2711 2762
2712 if (obj) { 2763 if (obj) {
2713 u32 size = obj->gtt_space->size; 2764 u32 size = i915_gem_obj_ggtt_size(obj);
2714 int pitch_val; 2765 int pitch_val;
2715 int tile_width; 2766 int tile_width;
2716 2767
2717 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || 2768 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2718 (size & -size) != size || 2769 (size & -size) != size ||
2719 (obj->gtt_offset & (size - 1)), 2770 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2720 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", 2771 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2721 obj->gtt_offset, obj->map_and_fenceable, size); 2772 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2722 2773
2723 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 2774 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2724 tile_width = 128; 2775 tile_width = 128;
@@ -2729,7 +2780,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
2729 pitch_val = obj->stride / tile_width; 2780 pitch_val = obj->stride / tile_width;
2730 pitch_val = ffs(pitch_val) - 1; 2781 pitch_val = ffs(pitch_val) - 1;
2731 2782
2732 val = obj->gtt_offset; 2783 val = i915_gem_obj_ggtt_offset(obj);
2733 if (obj->tiling_mode == I915_TILING_Y) 2784 if (obj->tiling_mode == I915_TILING_Y)
2734 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2785 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2735 val |= I915_FENCE_SIZE_BITS(size); 2786 val |= I915_FENCE_SIZE_BITS(size);
@@ -2754,19 +2805,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
2754 uint32_t val; 2805 uint32_t val;
2755 2806
2756 if (obj) { 2807 if (obj) {
2757 u32 size = obj->gtt_space->size; 2808 u32 size = i915_gem_obj_ggtt_size(obj);
2758 uint32_t pitch_val; 2809 uint32_t pitch_val;
2759 2810
2760 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || 2811 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2761 (size & -size) != size || 2812 (size & -size) != size ||
2762 (obj->gtt_offset & (size - 1)), 2813 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2763 "object 0x%08x not 512K or pot-size 0x%08x aligned\n", 2814 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2764 obj->gtt_offset, size); 2815 i915_gem_obj_ggtt_offset(obj), size);
2765 2816
2766 pitch_val = obj->stride / 128; 2817 pitch_val = obj->stride / 128;
2767 pitch_val = ffs(pitch_val) - 1; 2818 pitch_val = ffs(pitch_val) - 1;
2768 2819
2769 val = obj->gtt_offset; 2820 val = i915_gem_obj_ggtt_offset(obj);
2770 if (obj->tiling_mode == I915_TILING_Y) 2821 if (obj->tiling_mode == I915_TILING_Y)
2771 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2822 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2772 val |= I830_FENCE_SIZE_BITS(size); 2823 val |= I830_FENCE_SIZE_BITS(size);
@@ -2795,6 +2846,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
2795 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) 2846 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2796 mb(); 2847 mb();
2797 2848
2849 WARN(obj && (!obj->stride || !obj->tiling_mode),
2850 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2851 obj->stride, obj->tiling_mode);
2852
2798 switch (INTEL_INFO(dev)->gen) { 2853 switch (INTEL_INFO(dev)->gen) {
2799 case 7: 2854 case 7:
2800 case 6: 2855 case 6:
@@ -2836,6 +2891,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2836 fence->obj = NULL; 2891 fence->obj = NULL;
2837 list_del_init(&fence->lru_list); 2892 list_del_init(&fence->lru_list);
2838 } 2893 }
2894 obj->fence_dirty = false;
2839} 2895}
2840 2896
2841static int 2897static int
@@ -2965,7 +3021,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2965 return 0; 3021 return 0;
2966 3022
2967 i915_gem_object_update_fence(obj, reg, enable); 3023 i915_gem_object_update_fence(obj, reg, enable);
2968 obj->fence_dirty = false;
2969 3024
2970 return 0; 3025 return 0;
2971} 3026}
@@ -2983,7 +3038,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2983 if (HAS_LLC(dev)) 3038 if (HAS_LLC(dev))
2984 return true; 3039 return true;
2985 3040
2986 if (gtt_space == NULL) 3041 if (!drm_mm_node_allocated(gtt_space))
2987 return true; 3042 return true;
2988 3043
2989 if (list_empty(&gtt_space->node_list)) 3044 if (list_empty(&gtt_space->node_list))
@@ -3016,8 +3071,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
3016 3071
3017 if (obj->cache_level != obj->gtt_space->color) { 3072 if (obj->cache_level != obj->gtt_space->color) {
3018 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", 3073 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3019 obj->gtt_space->start, 3074 i915_gem_obj_ggtt_offset(obj),
3020 obj->gtt_space->start + obj->gtt_space->size, 3075 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3021 obj->cache_level, 3076 obj->cache_level,
3022 obj->gtt_space->color); 3077 obj->gtt_space->color);
3023 err++; 3078 err++;
@@ -3028,8 +3083,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
3028 obj->gtt_space, 3083 obj->gtt_space,
3029 obj->cache_level)) { 3084 obj->cache_level)) {
3030 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", 3085 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3031 obj->gtt_space->start, 3086 i915_gem_obj_ggtt_offset(obj),
3032 obj->gtt_space->start + obj->gtt_space->size, 3087 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3033 obj->cache_level); 3088 obj->cache_level);
3034 err++; 3089 err++;
3035 continue; 3090 continue;
@@ -3044,18 +3099,18 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
3044 * Finds free space in the GTT aperture and binds the object there. 3099 * Finds free space in the GTT aperture and binds the object there.
3045 */ 3100 */
3046static int 3101static int
3047i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 3102i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3048 unsigned alignment, 3103 struct i915_address_space *vm,
3049 bool map_and_fenceable, 3104 unsigned alignment,
3050 bool nonblocking) 3105 bool map_and_fenceable,
3106 bool nonblocking)
3051{ 3107{
3052 struct drm_device *dev = obj->base.dev; 3108 struct drm_device *dev = obj->base.dev;
3053 drm_i915_private_t *dev_priv = dev->dev_private; 3109 drm_i915_private_t *dev_priv = dev->dev_private;
3054 struct drm_mm_node *node;
3055 u32 size, fence_size, fence_alignment, unfenced_alignment; 3110 u32 size, fence_size, fence_alignment, unfenced_alignment;
3056 bool mappable, fenceable; 3111 size_t gtt_max =
3057 size_t gtt_max = map_and_fenceable ? 3112 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3058 dev_priv->gtt.mappable_end : dev_priv->gtt.total; 3113 struct i915_vma *vma;
3059 int ret; 3114 int ret;
3060 3115
3061 fence_size = i915_gem_get_gtt_size(dev, 3116 fence_size = i915_gem_get_gtt_size(dev,
@@ -3096,77 +3151,89 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3096 3151
3097 i915_gem_object_pin_pages(obj); 3152 i915_gem_object_pin_pages(obj);
3098 3153
3099 node = kzalloc(sizeof(*node), GFP_KERNEL); 3154 BUG_ON(!i915_is_ggtt(vm));
3100 if (node == NULL) { 3155
3101 i915_gem_object_unpin_pages(obj); 3156 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3102 return -ENOMEM; 3157 if (IS_ERR(vma)) {
3158 ret = PTR_ERR(vma);
3159 goto err_unpin;
3103 } 3160 }
3104 3161
3162 /* For now we only ever use 1 vma per object */
3163 WARN_ON(!list_is_singular(&obj->vma_list));
3164
3105search_free: 3165search_free:
3106 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, 3166 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3107 size, alignment, 3167 size, alignment,
3108 obj->cache_level, 0, gtt_max); 3168 obj->cache_level, 0, gtt_max,
3169 DRM_MM_SEARCH_DEFAULT);
3109 if (ret) { 3170 if (ret) {
3110 ret = i915_gem_evict_something(dev, size, alignment, 3171 ret = i915_gem_evict_something(dev, vm, size, alignment,
3111 obj->cache_level, 3172 obj->cache_level,
3112 map_and_fenceable, 3173 map_and_fenceable,
3113 nonblocking); 3174 nonblocking);
3114 if (ret == 0) 3175 if (ret == 0)
3115 goto search_free; 3176 goto search_free;
3116 3177
3117 i915_gem_object_unpin_pages(obj); 3178 goto err_free_vma;
3118 kfree(node);
3119 return ret;
3120 } 3179 }
3121 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { 3180 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3122 i915_gem_object_unpin_pages(obj); 3181 obj->cache_level))) {
3123 drm_mm_put_block(node); 3182 ret = -EINVAL;
3124 return -EINVAL; 3183 goto err_remove_node;
3125 } 3184 }
3126 3185
3127 ret = i915_gem_gtt_prepare_object(obj); 3186 ret = i915_gem_gtt_prepare_object(obj);
3128 if (ret) { 3187 if (ret)
3129 i915_gem_object_unpin_pages(obj); 3188 goto err_remove_node;
3130 drm_mm_put_block(node);
3131 return ret;
3132 }
3133 3189
3134 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3190 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3135 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 3191 list_add_tail(&vma->mm_list, &vm->inactive_list);
3136 3192
3137 obj->gtt_space = node; 3193 if (i915_is_ggtt(vm)) {
3138 obj->gtt_offset = node->start; 3194 bool mappable, fenceable;
3139 3195
3140 fenceable = 3196 fenceable = (vma->node.size == fence_size &&
3141 node->size == fence_size && 3197 (vma->node.start & (fence_alignment - 1)) == 0);
3142 (node->start & (fence_alignment - 1)) == 0;
3143 3198
3144 mappable = 3199 mappable = (vma->node.start + obj->base.size <=
3145 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; 3200 dev_priv->gtt.mappable_end);
3146 3201
3147 obj->map_and_fenceable = mappable && fenceable; 3202 obj->map_and_fenceable = mappable && fenceable;
3203 }
3148 3204
3149 trace_i915_gem_object_bind(obj, map_and_fenceable); 3205 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3206
3207 trace_i915_vma_bind(vma, map_and_fenceable);
3150 i915_gem_verify_gtt(dev); 3208 i915_gem_verify_gtt(dev);
3151 return 0; 3209 return 0;
3210
3211err_remove_node:
3212 drm_mm_remove_node(&vma->node);
3213err_free_vma:
3214 i915_gem_vma_destroy(vma);
3215err_unpin:
3216 i915_gem_object_unpin_pages(obj);
3217 return ret;
3152} 3218}
3153 3219
3154void 3220bool
3155i915_gem_clflush_object(struct drm_i915_gem_object *obj) 3221i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3222 bool force)
3156{ 3223{
3157 /* If we don't have a page list set up, then we're not pinned 3224 /* If we don't have a page list set up, then we're not pinned
3158 * to GPU, and we can ignore the cache flush because it'll happen 3225 * to GPU, and we can ignore the cache flush because it'll happen
3159 * again at bind time. 3226 * again at bind time.
3160 */ 3227 */
3161 if (obj->pages == NULL) 3228 if (obj->pages == NULL)
3162 return; 3229 return false;
3163 3230
3164 /* 3231 /*
3165 * Stolen memory is always coherent with the GPU as it is explicitly 3232 * Stolen memory is always coherent with the GPU as it is explicitly
3166 * marked as wc by the system, or the system is cache-coherent. 3233 * marked as wc by the system, or the system is cache-coherent.
3167 */ 3234 */
3168 if (obj->stolen) 3235 if (obj->stolen)
3169 return; 3236 return false;
3170 3237
3171 /* If the GPU is snooping the contents of the CPU cache, 3238 /* If the GPU is snooping the contents of the CPU cache,
3172 * we do not need to manually clear the CPU cache lines. However, 3239 * we do not need to manually clear the CPU cache lines. However,
@@ -3176,12 +3243,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3176 * snooping behaviour occurs naturally as the result of our domain 3243 * snooping behaviour occurs naturally as the result of our domain
3177 * tracking. 3244 * tracking.
3178 */ 3245 */
3179 if (obj->cache_level != I915_CACHE_NONE) 3246 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3180 return; 3247 return false;
3181 3248
3182 trace_i915_gem_object_clflush(obj); 3249 trace_i915_gem_object_clflush(obj);
3183
3184 drm_clflush_sg(obj->pages); 3250 drm_clflush_sg(obj->pages);
3251
3252 return true;
3185} 3253}
3186 3254
3187/** Flushes the GTT write domain for the object if it's dirty. */ 3255/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3213,15 +3281,17 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3213 3281
3214/** Flushes the CPU write domain for the object if it's dirty. */ 3282/** Flushes the CPU write domain for the object if it's dirty. */
3215static void 3283static void
3216i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) 3284i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3285 bool force)
3217{ 3286{
3218 uint32_t old_write_domain; 3287 uint32_t old_write_domain;
3219 3288
3220 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 3289 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3221 return; 3290 return;
3222 3291
3223 i915_gem_clflush_object(obj); 3292 if (i915_gem_clflush_object(obj, force))
3224 i915_gem_chipset_flush(obj->base.dev); 3293 i915_gem_chipset_flush(obj->base.dev);
3294
3225 old_write_domain = obj->base.write_domain; 3295 old_write_domain = obj->base.write_domain;
3226 obj->base.write_domain = 0; 3296 obj->base.write_domain = 0;
3227 3297
@@ -3244,7 +3314,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3244 int ret; 3314 int ret;
3245 3315
3246 /* Not valid to be called on unbound objects. */ 3316 /* Not valid to be called on unbound objects. */
3247 if (obj->gtt_space == NULL) 3317 if (!i915_gem_obj_bound_any(obj))
3248 return -EINVAL; 3318 return -EINVAL;
3249 3319
3250 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3320 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3254,7 +3324,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3254 if (ret) 3324 if (ret)
3255 return ret; 3325 return ret;
3256 3326
3257 i915_gem_object_flush_cpu_write_domain(obj); 3327 i915_gem_object_flush_cpu_write_domain(obj, false);
3258 3328
3259 /* Serialise direct access to this object with the barriers for 3329 /* Serialise direct access to this object with the barriers for
3260 * coherent writes from the GPU, by effectively invalidating the 3330 * coherent writes from the GPU, by effectively invalidating the
@@ -3282,8 +3352,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3282 old_write_domain); 3352 old_write_domain);
3283 3353
3284 /* And bump the LRU for this access */ 3354 /* And bump the LRU for this access */
3285 if (i915_gem_object_is_inactive(obj)) 3355 if (i915_gem_object_is_inactive(obj)) {
3286 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 3356 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
3357 &dev_priv->gtt.base);
3358 if (vma)
3359 list_move_tail(&vma->mm_list,
3360 &dev_priv->gtt.base.inactive_list);
3361
3362 }
3287 3363
3288 return 0; 3364 return 0;
3289} 3365}
@@ -3293,6 +3369,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3293{ 3369{
3294 struct drm_device *dev = obj->base.dev; 3370 struct drm_device *dev = obj->base.dev;
3295 drm_i915_private_t *dev_priv = dev->dev_private; 3371 drm_i915_private_t *dev_priv = dev->dev_private;
3372 struct i915_vma *vma;
3296 int ret; 3373 int ret;
3297 3374
3298 if (obj->cache_level == cache_level) 3375 if (obj->cache_level == cache_level)
@@ -3303,13 +3380,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3303 return -EBUSY; 3380 return -EBUSY;
3304 } 3381 }
3305 3382
3306 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { 3383 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3307 ret = i915_gem_object_unbind(obj); 3384 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3308 if (ret) 3385 ret = i915_vma_unbind(vma);
3309 return ret; 3386 if (ret)
3387 return ret;
3388
3389 break;
3390 }
3310 } 3391 }
3311 3392
3312 if (obj->gtt_space) { 3393 if (i915_gem_obj_bound_any(obj)) {
3313 ret = i915_gem_object_finish_gpu(obj); 3394 ret = i915_gem_object_finish_gpu(obj);
3314 if (ret) 3395 if (ret)
3315 return ret; 3396 return ret;
@@ -3331,11 +3412,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3331 if (obj->has_aliasing_ppgtt_mapping) 3412 if (obj->has_aliasing_ppgtt_mapping)
3332 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, 3413 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3333 obj, cache_level); 3414 obj, cache_level);
3334
3335 obj->gtt_space->color = cache_level;
3336 } 3415 }
3337 3416
3338 if (cache_level == I915_CACHE_NONE) { 3417 list_for_each_entry(vma, &obj->vma_list, vma_link)
3418 vma->node.color = cache_level;
3419 obj->cache_level = cache_level;
3420
3421 if (cpu_write_needs_clflush(obj)) {
3339 u32 old_read_domains, old_write_domain; 3422 u32 old_read_domains, old_write_domain;
3340 3423
3341 /* If we're coming from LLC cached, then we haven't 3424 /* If we're coming from LLC cached, then we haven't
@@ -3345,7 +3428,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3345 * Just set it to the CPU cache for now. 3428 * Just set it to the CPU cache for now.
3346 */ 3429 */
3347 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 3430 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3348 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3349 3431
3350 old_read_domains = obj->base.read_domains; 3432 old_read_domains = obj->base.read_domains;
3351 old_write_domain = obj->base.write_domain; 3433 old_write_domain = obj->base.write_domain;
@@ -3358,7 +3440,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3358 old_write_domain); 3440 old_write_domain);
3359 } 3441 }
3360 3442
3361 obj->cache_level = cache_level;
3362 i915_gem_verify_gtt(dev); 3443 i915_gem_verify_gtt(dev);
3363 return 0; 3444 return 0;
3364} 3445}
@@ -3380,7 +3461,20 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3380 goto unlock; 3461 goto unlock;
3381 } 3462 }
3382 3463
3383 args->caching = obj->cache_level != I915_CACHE_NONE; 3464 switch (obj->cache_level) {
3465 case I915_CACHE_LLC:
3466 case I915_CACHE_L3_LLC:
3467 args->caching = I915_CACHING_CACHED;
3468 break;
3469
3470 case I915_CACHE_WT:
3471 args->caching = I915_CACHING_DISPLAY;
3472 break;
3473
3474 default:
3475 args->caching = I915_CACHING_NONE;
3476 break;
3477 }
3384 3478
3385 drm_gem_object_unreference(&obj->base); 3479 drm_gem_object_unreference(&obj->base);
3386unlock: 3480unlock:
@@ -3403,6 +3497,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3403 case I915_CACHING_CACHED: 3497 case I915_CACHING_CACHED:
3404 level = I915_CACHE_LLC; 3498 level = I915_CACHE_LLC;
3405 break; 3499 break;
3500 case I915_CACHING_DISPLAY:
3501 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3502 break;
3406 default: 3503 default:
3407 return -EINVAL; 3504 return -EINVAL;
3408 } 3505 }
@@ -3425,6 +3522,22 @@ unlock:
3425 return ret; 3522 return ret;
3426} 3523}
3427 3524
3525static bool is_pin_display(struct drm_i915_gem_object *obj)
3526{
3527 /* There are 3 sources that pin objects:
3528 * 1. The display engine (scanouts, sprites, cursors);
3529 * 2. Reservations for execbuffer;
3530 * 3. The user.
3531 *
3532 * We can ignore reservations as we hold the struct_mutex and
3533 * are only called outside of the reservation path. The user
3534 * can only increment pin_count once, and so if after
3535 * subtracting the potential reference by the user, any pin_count
3536 * remains, it must be due to another use by the display engine.
3537 */
3538 return obj->pin_count - !!obj->user_pin_count;
3539}
3540
3428/* 3541/*
3429 * Prepare buffer for display plane (scanout, cursors, etc). 3542 * Prepare buffer for display plane (scanout, cursors, etc).
3430 * Can be called from an uninterruptible phase (modesetting) and allows 3543 * Can be called from an uninterruptible phase (modesetting) and allows
@@ -3444,6 +3557,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3444 return ret; 3557 return ret;
3445 } 3558 }
3446 3559
3560 /* Mark the pin_display early so that we account for the
3561 * display coherency whilst setting up the cache domains.
3562 */
3563 obj->pin_display = true;
3564
3447 /* The display engine is not coherent with the LLC cache on gen6. As 3565 /* The display engine is not coherent with the LLC cache on gen6. As
3448 * a result, we make sure that the pinning that is about to occur is 3566 * a result, we make sure that the pinning that is about to occur is
3449 * done with uncached PTEs. This is lowest common denominator for all 3567 * done with uncached PTEs. This is lowest common denominator for all
@@ -3453,19 +3571,20 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3453 * of uncaching, which would allow us to flush all the LLC-cached data 3571 * of uncaching, which would allow us to flush all the LLC-cached data
3454 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3572 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3455 */ 3573 */
3456 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); 3574 ret = i915_gem_object_set_cache_level(obj,
3575 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3457 if (ret) 3576 if (ret)
3458 return ret; 3577 goto err_unpin_display;
3459 3578
3460 /* As the user may map the buffer once pinned in the display plane 3579 /* As the user may map the buffer once pinned in the display plane
3461 * (e.g. libkms for the bootup splash), we have to ensure that we 3580 * (e.g. libkms for the bootup splash), we have to ensure that we
3462 * always use map_and_fenceable for all scanout buffers. 3581 * always use map_and_fenceable for all scanout buffers.
3463 */ 3582 */
3464 ret = i915_gem_object_pin(obj, alignment, true, false); 3583 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
3465 if (ret) 3584 if (ret)
3466 return ret; 3585 goto err_unpin_display;
3467 3586
3468 i915_gem_object_flush_cpu_write_domain(obj); 3587 i915_gem_object_flush_cpu_write_domain(obj, true);
3469 3588
3470 old_write_domain = obj->base.write_domain; 3589 old_write_domain = obj->base.write_domain;
3471 old_read_domains = obj->base.read_domains; 3590 old_read_domains = obj->base.read_domains;
@@ -3481,6 +3600,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3481 old_write_domain); 3600 old_write_domain);
3482 3601
3483 return 0; 3602 return 0;
3603
3604err_unpin_display:
3605 obj->pin_display = is_pin_display(obj);
3606 return ret;
3607}
3608
3609void
3610i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3611{
3612 i915_gem_object_unpin(obj);
3613 obj->pin_display = is_pin_display(obj);
3484} 3614}
3485 3615
3486int 3616int
@@ -3526,7 +3656,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3526 3656
3527 /* Flush the CPU cache if it's still invalid. */ 3657 /* Flush the CPU cache if it's still invalid. */
3528 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 3658 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3529 i915_gem_clflush_object(obj); 3659 i915_gem_clflush_object(obj, false);
3530 3660
3531 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 3661 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3532 } 3662 }
@@ -3604,37 +3734,44 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3604 3734
3605int 3735int
3606i915_gem_object_pin(struct drm_i915_gem_object *obj, 3736i915_gem_object_pin(struct drm_i915_gem_object *obj,
3737 struct i915_address_space *vm,
3607 uint32_t alignment, 3738 uint32_t alignment,
3608 bool map_and_fenceable, 3739 bool map_and_fenceable,
3609 bool nonblocking) 3740 bool nonblocking)
3610{ 3741{
3742 struct i915_vma *vma;
3611 int ret; 3743 int ret;
3612 3744
3613 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3745 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3614 return -EBUSY; 3746 return -EBUSY;
3615 3747
3616 if (obj->gtt_space != NULL) { 3748 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3617 if ((alignment && obj->gtt_offset & (alignment - 1)) || 3749
3750 vma = i915_gem_obj_to_vma(obj, vm);
3751
3752 if (vma) {
3753 if ((alignment &&
3754 vma->node.start & (alignment - 1)) ||
3618 (map_and_fenceable && !obj->map_and_fenceable)) { 3755 (map_and_fenceable && !obj->map_and_fenceable)) {
3619 WARN(obj->pin_count, 3756 WARN(obj->pin_count,
3620 "bo is already pinned with incorrect alignment:" 3757 "bo is already pinned with incorrect alignment:"
3621 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," 3758 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3622 " obj->map_and_fenceable=%d\n", 3759 " obj->map_and_fenceable=%d\n",
3623 obj->gtt_offset, alignment, 3760 i915_gem_obj_offset(obj, vm), alignment,
3624 map_and_fenceable, 3761 map_and_fenceable,
3625 obj->map_and_fenceable); 3762 obj->map_and_fenceable);
3626 ret = i915_gem_object_unbind(obj); 3763 ret = i915_vma_unbind(vma);
3627 if (ret) 3764 if (ret)
3628 return ret; 3765 return ret;
3629 } 3766 }
3630 } 3767 }
3631 3768
3632 if (obj->gtt_space == NULL) { 3769 if (!i915_gem_obj_bound(obj, vm)) {
3633 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3770 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3634 3771
3635 ret = i915_gem_object_bind_to_gtt(obj, alignment, 3772 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3636 map_and_fenceable, 3773 map_and_fenceable,
3637 nonblocking); 3774 nonblocking);
3638 if (ret) 3775 if (ret)
3639 return ret; 3776 return ret;
3640 3777
@@ -3655,7 +3792,7 @@ void
3655i915_gem_object_unpin(struct drm_i915_gem_object *obj) 3792i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3656{ 3793{
3657 BUG_ON(obj->pin_count == 0); 3794 BUG_ON(obj->pin_count == 0);
3658 BUG_ON(obj->gtt_space == NULL); 3795 BUG_ON(!i915_gem_obj_bound_any(obj));
3659 3796
3660 if (--obj->pin_count == 0) 3797 if (--obj->pin_count == 0)
3661 obj->pin_mappable = false; 3798 obj->pin_mappable = false;
@@ -3693,7 +3830,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3693 } 3830 }
3694 3831
3695 if (obj->user_pin_count == 0) { 3832 if (obj->user_pin_count == 0) {
3696 ret = i915_gem_object_pin(obj, args->alignment, true, false); 3833 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3697 if (ret) 3834 if (ret)
3698 goto out; 3835 goto out;
3699 } 3836 }
@@ -3701,11 +3838,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3701 obj->user_pin_count++; 3838 obj->user_pin_count++;
3702 obj->pin_filp = file; 3839 obj->pin_filp = file;
3703 3840
3704 /* XXX - flush the CPU caches for pinned objects 3841 args->offset = i915_gem_obj_ggtt_offset(obj);
3705 * as the X server doesn't manage domains yet
3706 */
3707 i915_gem_object_flush_cpu_write_domain(obj);
3708 args->offset = obj->gtt_offset;
3709out: 3842out:
3710 drm_gem_object_unreference(&obj->base); 3843 drm_gem_object_unreference(&obj->base);
3711unlock: 3844unlock:
@@ -3844,10 +3977,11 @@ unlock:
3844void i915_gem_object_init(struct drm_i915_gem_object *obj, 3977void i915_gem_object_init(struct drm_i915_gem_object *obj,
3845 const struct drm_i915_gem_object_ops *ops) 3978 const struct drm_i915_gem_object_ops *ops)
3846{ 3979{
3847 INIT_LIST_HEAD(&obj->mm_list);
3848 INIT_LIST_HEAD(&obj->global_list); 3980 INIT_LIST_HEAD(&obj->global_list);
3849 INIT_LIST_HEAD(&obj->ring_list); 3981 INIT_LIST_HEAD(&obj->ring_list);
3850 INIT_LIST_HEAD(&obj->exec_list); 3982 INIT_LIST_HEAD(&obj->exec_list);
3983 INIT_LIST_HEAD(&obj->obj_exec_link);
3984 INIT_LIST_HEAD(&obj->vma_list);
3851 3985
3852 obj->ops = ops; 3986 obj->ops = ops;
3853 3987
@@ -3912,6 +4046,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3912 } else 4046 } else
3913 obj->cache_level = I915_CACHE_NONE; 4047 obj->cache_level = I915_CACHE_NONE;
3914 4048
4049 trace_i915_gem_object_create(obj);
4050
3915 return obj; 4051 return obj;
3916} 4052}
3917 4053
@@ -3927,6 +4063,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3927 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4063 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3928 struct drm_device *dev = obj->base.dev; 4064 struct drm_device *dev = obj->base.dev;
3929 drm_i915_private_t *dev_priv = dev->dev_private; 4065 drm_i915_private_t *dev_priv = dev->dev_private;
4066 struct i915_vma *vma, *next;
3930 4067
3931 trace_i915_gem_object_destroy(obj); 4068 trace_i915_gem_object_destroy(obj);
3932 4069
@@ -3934,15 +4071,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3934 i915_gem_detach_phys_object(dev, obj); 4071 i915_gem_detach_phys_object(dev, obj);
3935 4072
3936 obj->pin_count = 0; 4073 obj->pin_count = 0;
3937 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) { 4074 /* NB: 0 or 1 elements */
3938 bool was_interruptible; 4075 WARN_ON(!list_empty(&obj->vma_list) &&
4076 !list_is_singular(&obj->vma_list));
4077 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4078 int ret = i915_vma_unbind(vma);
4079 if (WARN_ON(ret == -ERESTARTSYS)) {
4080 bool was_interruptible;
3939 4081
3940 was_interruptible = dev_priv->mm.interruptible; 4082 was_interruptible = dev_priv->mm.interruptible;
3941 dev_priv->mm.interruptible = false; 4083 dev_priv->mm.interruptible = false;
3942 4084
3943 WARN_ON(i915_gem_object_unbind(obj)); 4085 WARN_ON(i915_vma_unbind(vma));
3944 4086
3945 dev_priv->mm.interruptible = was_interruptible; 4087 dev_priv->mm.interruptible = was_interruptible;
4088 }
3946 } 4089 }
3947 4090
3948 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4091 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
@@ -3968,15 +4111,42 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3968 i915_gem_object_free(obj); 4111 i915_gem_object_free(obj);
3969} 4112}
3970 4113
4114struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4115 struct i915_address_space *vm)
4116{
4117 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4118 if (vma == NULL)
4119 return ERR_PTR(-ENOMEM);
4120
4121 INIT_LIST_HEAD(&vma->vma_link);
4122 INIT_LIST_HEAD(&vma->mm_list);
4123 INIT_LIST_HEAD(&vma->exec_list);
4124 vma->vm = vm;
4125 vma->obj = obj;
4126
4127 /* Keep GGTT vmas first to make debug easier */
4128 if (i915_is_ggtt(vm))
4129 list_add(&vma->vma_link, &obj->vma_list);
4130 else
4131 list_add_tail(&vma->vma_link, &obj->vma_list);
4132
4133 return vma;
4134}
4135
4136void i915_gem_vma_destroy(struct i915_vma *vma)
4137{
4138 WARN_ON(vma->node.allocated);
4139 list_del(&vma->vma_link);
4140 kfree(vma);
4141}
4142
3971int 4143int
3972i915_gem_idle(struct drm_device *dev) 4144i915_gem_idle(struct drm_device *dev)
3973{ 4145{
3974 drm_i915_private_t *dev_priv = dev->dev_private; 4146 drm_i915_private_t *dev_priv = dev->dev_private;
3975 int ret; 4147 int ret;
3976 4148
3977 mutex_lock(&dev->struct_mutex); 4149 if (dev_priv->ums.mm_suspended) {
3978
3979 if (dev_priv->mm.suspended) {
3980 mutex_unlock(&dev->struct_mutex); 4150 mutex_unlock(&dev->struct_mutex);
3981 return 0; 4151 return 0;
3982 } 4152 }
@@ -3992,18 +4162,11 @@ i915_gem_idle(struct drm_device *dev)
3992 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4162 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3993 i915_gem_evict_everything(dev); 4163 i915_gem_evict_everything(dev);
3994 4164
3995 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3996 * We need to replace this with a semaphore, or something.
3997 * And not confound mm.suspended!
3998 */
3999 dev_priv->mm.suspended = 1;
4000 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 4165 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4001 4166
4002 i915_kernel_lost_context(dev); 4167 i915_kernel_lost_context(dev);
4003 i915_gem_cleanup_ringbuffer(dev); 4168 i915_gem_cleanup_ringbuffer(dev);
4004 4169
4005 mutex_unlock(&dev->struct_mutex);
4006
4007 /* Cancel the retire work handler, which should be idle now. */ 4170 /* Cancel the retire work handler, which should be idle now. */
4008 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4171 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4009 4172
@@ -4136,8 +4299,8 @@ i915_gem_init_hw(struct drm_device *dev)
4136 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4299 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4137 return -EIO; 4300 return -EIO;
4138 4301
4139 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) 4302 if (dev_priv->ellc_size)
4140 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); 4303 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4141 4304
4142 if (HAS_PCH_NOP(dev)) { 4305 if (HAS_PCH_NOP(dev)) {
4143 u32 temp = I915_READ(GEN7_MSG_CTL); 4306 u32 temp = I915_READ(GEN7_MSG_CTL);
@@ -4213,7 +4376,7 @@ int
4213i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 4376i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4214 struct drm_file *file_priv) 4377 struct drm_file *file_priv)
4215{ 4378{
4216 drm_i915_private_t *dev_priv = dev->dev_private; 4379 struct drm_i915_private *dev_priv = dev->dev_private;
4217 int ret; 4380 int ret;
4218 4381
4219 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4382 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4225,7 +4388,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4225 } 4388 }
4226 4389
4227 mutex_lock(&dev->struct_mutex); 4390 mutex_lock(&dev->struct_mutex);
4228 dev_priv->mm.suspended = 0; 4391 dev_priv->ums.mm_suspended = 0;
4229 4392
4230 ret = i915_gem_init_hw(dev); 4393 ret = i915_gem_init_hw(dev);
4231 if (ret != 0) { 4394 if (ret != 0) {
@@ -4233,7 +4396,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4233 return ret; 4396 return ret;
4234 } 4397 }
4235 4398
4236 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4399 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4237 mutex_unlock(&dev->struct_mutex); 4400 mutex_unlock(&dev->struct_mutex);
4238 4401
4239 ret = drm_irq_install(dev); 4402 ret = drm_irq_install(dev);
@@ -4245,7 +4408,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4245cleanup_ringbuffer: 4408cleanup_ringbuffer:
4246 mutex_lock(&dev->struct_mutex); 4409 mutex_lock(&dev->struct_mutex);
4247 i915_gem_cleanup_ringbuffer(dev); 4410 i915_gem_cleanup_ringbuffer(dev);
4248 dev_priv->mm.suspended = 1; 4411 dev_priv->ums.mm_suspended = 1;
4249 mutex_unlock(&dev->struct_mutex); 4412 mutex_unlock(&dev->struct_mutex);
4250 4413
4251 return ret; 4414 return ret;
@@ -4255,11 +4418,26 @@ int
4255i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 4418i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4256 struct drm_file *file_priv) 4419 struct drm_file *file_priv)
4257{ 4420{
4421 struct drm_i915_private *dev_priv = dev->dev_private;
4422 int ret;
4423
4258 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4424 if (drm_core_check_feature(dev, DRIVER_MODESET))
4259 return 0; 4425 return 0;
4260 4426
4261 drm_irq_uninstall(dev); 4427 drm_irq_uninstall(dev);
4262 return i915_gem_idle(dev); 4428
4429 mutex_lock(&dev->struct_mutex);
4430 ret = i915_gem_idle(dev);
4431
4432 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4433 * We need to replace this with a semaphore, or something.
4434 * And not confound ums.mm_suspended!
4435 */
4436 if (ret != 0)
4437 dev_priv->ums.mm_suspended = 1;
4438 mutex_unlock(&dev->struct_mutex);
4439
4440 return ret;
4263} 4441}
4264 4442
4265void 4443void
@@ -4270,9 +4448,11 @@ i915_gem_lastclose(struct drm_device *dev)
4270 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4448 if (drm_core_check_feature(dev, DRIVER_MODESET))
4271 return; 4449 return;
4272 4450
4451 mutex_lock(&dev->struct_mutex);
4273 ret = i915_gem_idle(dev); 4452 ret = i915_gem_idle(dev);
4274 if (ret) 4453 if (ret)
4275 DRM_ERROR("failed to idle hardware: %d\n", ret); 4454 DRM_ERROR("failed to idle hardware: %d\n", ret);
4455 mutex_unlock(&dev->struct_mutex);
4276} 4456}
4277 4457
4278static void 4458static void
@@ -4282,6 +4462,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
4282 INIT_LIST_HEAD(&ring->request_list); 4462 INIT_LIST_HEAD(&ring->request_list);
4283} 4463}
4284 4464
4465static void i915_init_vm(struct drm_i915_private *dev_priv,
4466 struct i915_address_space *vm)
4467{
4468 vm->dev = dev_priv->dev;
4469 INIT_LIST_HEAD(&vm->active_list);
4470 INIT_LIST_HEAD(&vm->inactive_list);
4471 INIT_LIST_HEAD(&vm->global_link);
4472 list_add(&vm->global_link, &dev_priv->vm_list);
4473}
4474
4285void 4475void
4286i915_gem_load(struct drm_device *dev) 4476i915_gem_load(struct drm_device *dev)
4287{ 4477{
@@ -4294,8 +4484,9 @@ i915_gem_load(struct drm_device *dev)
4294 SLAB_HWCACHE_ALIGN, 4484 SLAB_HWCACHE_ALIGN,
4295 NULL); 4485 NULL);
4296 4486
4297 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4487 INIT_LIST_HEAD(&dev_priv->vm_list);
4298 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4488 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4489
4299 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4490 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4300 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4491 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4301 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4492 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4594,11 +4785,101 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4594 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 4785 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4595 if (obj->pages_pin_count == 0) 4786 if (obj->pages_pin_count == 0)
4596 cnt += obj->base.size >> PAGE_SHIFT; 4787 cnt += obj->base.size >> PAGE_SHIFT;
4597 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) 4788
4789 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4790 if (obj->active)
4791 continue;
4792
4598 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4793 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4599 cnt += obj->base.size >> PAGE_SHIFT; 4794 cnt += obj->base.size >> PAGE_SHIFT;
4795 }
4600 4796
4601 if (unlock) 4797 if (unlock)
4602 mutex_unlock(&dev->struct_mutex); 4798 mutex_unlock(&dev->struct_mutex);
4603 return cnt; 4799 return cnt;
4604} 4800}
4801
4802/* All the new VM stuff */
4803unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4804 struct i915_address_space *vm)
4805{
4806 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4807 struct i915_vma *vma;
4808
4809 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4810 vm = &dev_priv->gtt.base;
4811
4812 BUG_ON(list_empty(&o->vma_list));
4813 list_for_each_entry(vma, &o->vma_list, vma_link) {
4814 if (vma->vm == vm)
4815 return vma->node.start;
4816
4817 }
4818 return -1;
4819}
4820
4821bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4822 struct i915_address_space *vm)
4823{
4824 struct i915_vma *vma;
4825
4826 list_for_each_entry(vma, &o->vma_list, vma_link)
4827 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4828 return true;
4829
4830 return false;
4831}
4832
4833bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4834{
4835 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4836 struct i915_address_space *vm;
4837
4838 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4839 if (i915_gem_obj_bound(o, vm))
4840 return true;
4841
4842 return false;
4843}
4844
4845unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4846 struct i915_address_space *vm)
4847{
4848 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4849 struct i915_vma *vma;
4850
4851 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4852 vm = &dev_priv->gtt.base;
4853
4854 BUG_ON(list_empty(&o->vma_list));
4855
4856 list_for_each_entry(vma, &o->vma_list, vma_link)
4857 if (vma->vm == vm)
4858 return vma->node.size;
4859
4860 return 0;
4861}
4862
4863struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4864 struct i915_address_space *vm)
4865{
4866 struct i915_vma *vma;
4867 list_for_each_entry(vma, &obj->vma_list, vma_link)
4868 if (vma->vm == vm)
4869 return vma;
4870
4871 return NULL;
4872}
4873
4874struct i915_vma *
4875i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4876 struct i915_address_space *vm)
4877{
4878 struct i915_vma *vma;
4879
4880 vma = i915_gem_obj_to_vma(obj, vm);
4881 if (!vma)
4882 vma = i915_gem_vma_create(obj, vm);
4883
4884 return vma;
4885}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 51b7a2171cae..403309c2a7d6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -155,7 +155,7 @@ create_hw_context(struct drm_device *dev,
155 155
156 if (INTEL_INFO(dev)->gen >= 7) { 156 if (INTEL_INFO(dev)->gen >= 7) {
157 ret = i915_gem_object_set_cache_level(ctx->obj, 157 ret = i915_gem_object_set_cache_level(ctx->obj,
158 I915_CACHE_LLC_MLC); 158 I915_CACHE_L3_LLC);
159 /* Failure shouldn't ever happen this early */ 159 /* Failure shouldn't ever happen this early */
160 if (WARN_ON(ret)) 160 if (WARN_ON(ret))
161 goto err_out; 161 goto err_out;
@@ -214,7 +214,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
214 * default context. 214 * default context.
215 */ 215 */
216 dev_priv->ring[RCS].default_context = ctx; 216 dev_priv->ring[RCS].default_context = ctx;
217 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false); 217 ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
218 if (ret) { 218 if (ret) {
219 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 219 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
220 goto err_destroy; 220 goto err_destroy;
@@ -304,31 +304,24 @@ static int context_idr_cleanup(int id, void *p, void *data)
304} 304}
305 305
306struct i915_ctx_hang_stats * 306struct i915_ctx_hang_stats *
307i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, 307i915_gem_context_get_hang_stats(struct drm_device *dev,
308 struct drm_file *file, 308 struct drm_file *file,
309 u32 id) 309 u32 id)
310{ 310{
311 struct drm_i915_private *dev_priv = ring->dev->dev_private; 311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct drm_i915_file_private *file_priv = file->driver_priv; 312 struct drm_i915_file_private *file_priv = file->driver_priv;
313 struct i915_hw_context *to; 313 struct i915_hw_context *ctx;
314
315 if (dev_priv->hw_contexts_disabled)
316 return ERR_PTR(-ENOENT);
317
318 if (ring->id != RCS)
319 return ERR_PTR(-EINVAL);
320
321 if (file == NULL)
322 return ERR_PTR(-EINVAL);
323 314
324 if (id == DEFAULT_CONTEXT_ID) 315 if (id == DEFAULT_CONTEXT_ID)
325 return &file_priv->hang_stats; 316 return &file_priv->hang_stats;
326 317
327 to = i915_gem_context_get(file->driver_priv, id); 318 ctx = NULL;
328 if (to == NULL) 319 if (!dev_priv->hw_contexts_disabled)
320 ctx = i915_gem_context_get(file->driver_priv, id);
321 if (ctx == NULL)
329 return ERR_PTR(-ENOENT); 322 return ERR_PTR(-ENOENT);
330 323
331 return &to->hang_stats; 324 return &ctx->hang_stats;
332} 325}
333 326
334void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) 327void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
@@ -377,7 +370,7 @@ mi_set_context(struct intel_ring_buffer *ring,
377 370
378 intel_ring_emit(ring, MI_NOOP); 371 intel_ring_emit(ring, MI_NOOP);
379 intel_ring_emit(ring, MI_SET_CONTEXT); 372 intel_ring_emit(ring, MI_SET_CONTEXT);
380 intel_ring_emit(ring, new_context->obj->gtt_offset | 373 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
381 MI_MM_SPACE_GTT | 374 MI_MM_SPACE_GTT |
382 MI_SAVE_EXT_STATE_EN | 375 MI_SAVE_EXT_STATE_EN |
383 MI_RESTORE_EXT_STATE_EN | 376 MI_RESTORE_EXT_STATE_EN |
@@ -407,7 +400,7 @@ static int do_switch(struct i915_hw_context *to)
407 if (from == to) 400 if (from == to)
408 return 0; 401 return 0;
409 402
410 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false); 403 ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
411 if (ret) 404 if (ret)
412 return ret; 405 return ret;
413 406
@@ -443,7 +436,10 @@ static int do_switch(struct i915_hw_context *to)
443 * MI_SET_CONTEXT instead of when the next seqno has completed. 436 * MI_SET_CONTEXT instead of when the next seqno has completed.
444 */ 437 */
445 if (from != NULL) { 438 if (from != NULL) {
439 struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
440 struct i915_address_space *ggtt = &dev_priv->gtt.base;
446 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 441 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
442 list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
447 i915_gem_object_move_to_active(from->obj, ring); 443 i915_gem_object_move_to_active(from->obj, ring);
448 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 444 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
449 * whole damn pipeline, we don't need to explicitly mark the 445 * whole damn pipeline, we don't need to explicitly mark the
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 582e6a5f3dac..775d506b3208 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev)
97 } 97 }
98 } 98 }
99 99
100 list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { 100 list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
101 if (obj->base.dev != dev || 101 if (obj->base.dev != dev ||
102 !atomic_read(&obj->base.refcount.refcount)) { 102 !atomic_read(&obj->base.refcount.refcount)) {
103 DRM_ERROR("freed inactive %p\n", obj); 103 DRM_ERROR("freed inactive %p\n", obj);
@@ -115,73 +115,4 @@ i915_verify_lists(struct drm_device *dev)
115 115
116 return warned = err; 116 return warned = err;
117} 117}
118#endif /* WATCH_INACTIVE */ 118#endif /* WATCH_LIST */
119
120#if WATCH_COHERENCY
121void
122i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
123{
124 struct drm_device *dev = obj->base.dev;
125 int page;
126 uint32_t *gtt_mapping;
127 uint32_t *backing_map = NULL;
128 int bad_count = 0;
129
130 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
131 __func__, obj, obj->gtt_offset, handle,
132 obj->size / 1024);
133
134 gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
135 obj->base.size);
136 if (gtt_mapping == NULL) {
137 DRM_ERROR("failed to map GTT space\n");
138 return;
139 }
140
141 for (page = 0; page < obj->size / PAGE_SIZE; page++) {
142 int i;
143
144 backing_map = kmap_atomic(obj->pages[page]);
145
146 if (backing_map == NULL) {
147 DRM_ERROR("failed to map backing page\n");
148 goto out;
149 }
150
151 for (i = 0; i < PAGE_SIZE / 4; i++) {
152 uint32_t cpuval = backing_map[i];
153 uint32_t gttval = readl(gtt_mapping +
154 page * 1024 + i);
155
156 if (cpuval != gttval) {
157 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
158 "0x%08x vs 0x%08x\n",
159 (int)(obj->gtt_offset +
160 page * PAGE_SIZE + i * 4),
161 cpuval, gttval);
162 if (bad_count++ >= 8) {
163 DRM_INFO("...\n");
164 goto out;
165 }
166 }
167 }
168 kunmap_atomic(backing_map);
169 backing_map = NULL;
170 }
171
172 out:
173 if (backing_map != NULL)
174 kunmap_atomic(backing_map);
175 iounmap(gtt_mapping);
176
177 /* give syslog time to catch up */
178 msleep(1);
179
180 /* Directly flush the object, since we just loaded values with the CPU
181 * from the backing pages and we don't want to disturb the cache
182 * management that we're trying to observe.
183 */
184
185 i915_gem_clflush_object(obj);
186}
187#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index dc53a527126b..e918b05fcbdd 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -27,10 +27,15 @@
27#include "i915_drv.h" 27#include "i915_drv.h"
28#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29 29
30static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31{
32 return to_intel_bo(buf->priv);
33}
34
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 35static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir) 36 enum dma_data_direction dir)
32{ 37{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
34 struct sg_table *st; 39 struct sg_table *st;
35 struct scatterlist *src, *dst; 40 struct scatterlist *src, *dst;
36 int ret, i; 41 int ret, i;
@@ -85,25 +90,22 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 struct sg_table *sg, 90 struct sg_table *sg,
86 enum dma_data_direction dir) 91 enum dma_data_direction dir)
87{ 92{
93 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
94
95 mutex_lock(&obj->base.dev->struct_mutex);
96
88 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 97 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
89 sg_free_table(sg); 98 sg_free_table(sg);
90 kfree(sg); 99 kfree(sg);
91}
92 100
93static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) 101 i915_gem_object_unpin_pages(obj);
94{
95 struct drm_i915_gem_object *obj = dma_buf->priv;
96 102
97 if (obj->base.export_dma_buf == dma_buf) { 103 mutex_unlock(&obj->base.dev->struct_mutex);
98 /* drop the reference on the export fd holds */
99 obj->base.export_dma_buf = NULL;
100 drm_gem_object_unreference_unlocked(&obj->base);
101 }
102} 104}
103 105
104static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 106static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
105{ 107{
106 struct drm_i915_gem_object *obj = dma_buf->priv; 108 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
107 struct drm_device *dev = obj->base.dev; 109 struct drm_device *dev = obj->base.dev;
108 struct sg_page_iter sg_iter; 110 struct sg_page_iter sg_iter;
109 struct page **pages; 111 struct page **pages;
@@ -151,7 +153,7 @@ error:
151 153
152static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 154static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
153{ 155{
154 struct drm_i915_gem_object *obj = dma_buf->priv; 156 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
155 struct drm_device *dev = obj->base.dev; 157 struct drm_device *dev = obj->base.dev;
156 int ret; 158 int ret;
157 159
@@ -194,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
194 196
195static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) 197static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
196{ 198{
197 struct drm_i915_gem_object *obj = dma_buf->priv; 199 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
198 struct drm_device *dev = obj->base.dev; 200 struct drm_device *dev = obj->base.dev;
199 int ret; 201 int ret;
200 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); 202 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
@@ -211,7 +213,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size
211static const struct dma_buf_ops i915_dmabuf_ops = { 213static const struct dma_buf_ops i915_dmabuf_ops = {
212 .map_dma_buf = i915_gem_map_dma_buf, 214 .map_dma_buf = i915_gem_map_dma_buf,
213 .unmap_dma_buf = i915_gem_unmap_dma_buf, 215 .unmap_dma_buf = i915_gem_unmap_dma_buf,
214 .release = i915_gem_dmabuf_release, 216 .release = drm_gem_dmabuf_release,
215 .kmap = i915_gem_dmabuf_kmap, 217 .kmap = i915_gem_dmabuf_kmap,
216 .kmap_atomic = i915_gem_dmabuf_kmap_atomic, 218 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
217 .kunmap = i915_gem_dmabuf_kunmap, 219 .kunmap = i915_gem_dmabuf_kunmap,
@@ -225,9 +227,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
225struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 227struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
226 struct drm_gem_object *gem_obj, int flags) 228 struct drm_gem_object *gem_obj, int flags)
227{ 229{
228 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 230 return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
229
230 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
231} 231}
232 232
233static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 233static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -264,7 +264,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
264 264
265 /* is this one of own objects? */ 265 /* is this one of own objects? */
266 if (dma_buf->ops == &i915_dmabuf_ops) { 266 if (dma_buf->ops == &i915_dmabuf_ops) {
267 obj = dma_buf->priv; 267 obj = dma_buf_to_obj(dma_buf);
268 /* is it from our device? */ 268 /* is it from our device? */
269 if (obj->base.dev == dev) { 269 if (obj->base.dev == dev) {
270 /* 270 /*
@@ -289,12 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
289 goto fail_detach; 289 goto fail_detach;
290 } 290 }
291 291
292 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 292 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
293 if (ret) {
294 i915_gem_object_free(obj);
295 goto fail_detach;
296 }
297
298 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); 293 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
299 obj->base.import_attach = attach; 294 obj->base.import_attach = attach;
300 295
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c86d5d9356fd..91b700155850 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,23 +32,23 @@
32#include "i915_trace.h" 32#include "i915_trace.h"
33 33
34static bool 34static bool
35mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) 35mark_free(struct i915_vma *vma, struct list_head *unwind)
36{ 36{
37 if (obj->pin_count) 37 if (vma->obj->pin_count)
38 return false; 38 return false;
39 39
40 list_add(&obj->exec_list, unwind); 40 list_add(&vma->exec_list, unwind);
41 return drm_mm_scan_add_block(obj->gtt_space); 41 return drm_mm_scan_add_block(&vma->node);
42} 42}
43 43
44int 44int
45i915_gem_evict_something(struct drm_device *dev, int min_size, 45i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
46 unsigned alignment, unsigned cache_level, 46 int min_size, unsigned alignment, unsigned cache_level,
47 bool mappable, bool nonblocking) 47 bool mappable, bool nonblocking)
48{ 48{
49 drm_i915_private_t *dev_priv = dev->dev_private; 49 drm_i915_private_t *dev_priv = dev->dev_private;
50 struct list_head eviction_list, unwind_list; 50 struct list_head eviction_list, unwind_list;
51 struct drm_i915_gem_object *obj; 51 struct i915_vma *vma;
52 int ret = 0; 52 int ret = 0;
53 53
54 trace_i915_gem_evict(dev, min_size, alignment, mappable); 54 trace_i915_gem_evict(dev, min_size, alignment, mappable);
@@ -77,17 +77,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
77 */ 77 */
78 78
79 INIT_LIST_HEAD(&unwind_list); 79 INIT_LIST_HEAD(&unwind_list);
80 if (mappable) 80 if (mappable) {
81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, 81 BUG_ON(!i915_is_ggtt(vm));
82 min_size, alignment, cache_level, 82 drm_mm_init_scan_with_range(&vm->mm, min_size,
83 0, dev_priv->gtt.mappable_end); 83 alignment, cache_level, 0,
84 else 84 dev_priv->gtt.mappable_end);
85 drm_mm_init_scan(&dev_priv->mm.gtt_space, 85 } else
86 min_size, alignment, cache_level); 86 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
87 87
88 /* First see if there is a large enough contiguous idle region... */ 88 /* First see if there is a large enough contiguous idle region... */
89 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { 89 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
90 if (mark_free(obj, &unwind_list)) 90 if (mark_free(vma, &unwind_list))
91 goto found; 91 goto found;
92 } 92 }
93 93
@@ -95,22 +95,21 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
95 goto none; 95 goto none;
96 96
97 /* Now merge in the soon-to-be-expired objects... */ 97 /* Now merge in the soon-to-be-expired objects... */
98 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 98 list_for_each_entry(vma, &vm->active_list, mm_list) {
99 if (mark_free(obj, &unwind_list)) 99 if (mark_free(vma, &unwind_list))
100 goto found; 100 goto found;
101 } 101 }
102 102
103none: 103none:
104 /* Nothing found, clean up and bail out! */ 104 /* Nothing found, clean up and bail out! */
105 while (!list_empty(&unwind_list)) { 105 while (!list_empty(&unwind_list)) {
106 obj = list_first_entry(&unwind_list, 106 vma = list_first_entry(&unwind_list,
107 struct drm_i915_gem_object, 107 struct i915_vma,
108 exec_list); 108 exec_list);
109 109 ret = drm_mm_scan_remove_block(&vma->node);
110 ret = drm_mm_scan_remove_block(obj->gtt_space);
111 BUG_ON(ret); 110 BUG_ON(ret);
112 111
113 list_del_init(&obj->exec_list); 112 list_del_init(&vma->exec_list);
114 } 113 }
115 114
116 /* We expect the caller to unpin, evict all and try again, or give up. 115 /* We expect the caller to unpin, evict all and try again, or give up.
@@ -124,27 +123,30 @@ found:
124 * temporary list. */ 123 * temporary list. */
125 INIT_LIST_HEAD(&eviction_list); 124 INIT_LIST_HEAD(&eviction_list);
126 while (!list_empty(&unwind_list)) { 125 while (!list_empty(&unwind_list)) {
127 obj = list_first_entry(&unwind_list, 126 vma = list_first_entry(&unwind_list,
128 struct drm_i915_gem_object, 127 struct i915_vma,
129 exec_list); 128 exec_list);
130 if (drm_mm_scan_remove_block(obj->gtt_space)) { 129 if (drm_mm_scan_remove_block(&vma->node)) {
131 list_move(&obj->exec_list, &eviction_list); 130 list_move(&vma->exec_list, &eviction_list);
132 drm_gem_object_reference(&obj->base); 131 drm_gem_object_reference(&vma->obj->base);
133 continue; 132 continue;
134 } 133 }
135 list_del_init(&obj->exec_list); 134 list_del_init(&vma->exec_list);
136 } 135 }
137 136
138 /* Unbinding will emit any required flushes */ 137 /* Unbinding will emit any required flushes */
139 while (!list_empty(&eviction_list)) { 138 while (!list_empty(&eviction_list)) {
140 obj = list_first_entry(&eviction_list, 139 struct drm_gem_object *obj;
141 struct drm_i915_gem_object, 140 vma = list_first_entry(&eviction_list,
141 struct i915_vma,
142 exec_list); 142 exec_list);
143
144 obj = &vma->obj->base;
145 list_del_init(&vma->exec_list);
143 if (ret == 0) 146 if (ret == 0)
144 ret = i915_gem_object_unbind(obj); 147 ret = i915_vma_unbind(vma);
145 148
146 list_del_init(&obj->exec_list); 149 drm_gem_object_unreference(obj);
147 drm_gem_object_unreference(&obj->base);
148 } 150 }
149 151
150 return ret; 152 return ret;
@@ -154,12 +156,18 @@ int
154i915_gem_evict_everything(struct drm_device *dev) 156i915_gem_evict_everything(struct drm_device *dev)
155{ 157{
156 drm_i915_private_t *dev_priv = dev->dev_private; 158 drm_i915_private_t *dev_priv = dev->dev_private;
157 struct drm_i915_gem_object *obj, *next; 159 struct i915_address_space *vm;
158 bool lists_empty; 160 struct i915_vma *vma, *next;
161 bool lists_empty = true;
159 int ret; 162 int ret;
160 163
161 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 164 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
162 list_empty(&dev_priv->mm.active_list)); 165 lists_empty = (list_empty(&vm->inactive_list) &&
166 list_empty(&vm->active_list));
167 if (!lists_empty)
168 lists_empty = false;
169 }
170
163 if (lists_empty) 171 if (lists_empty)
164 return -ENOSPC; 172 return -ENOSPC;
165 173
@@ -176,10 +184,11 @@ i915_gem_evict_everything(struct drm_device *dev)
176 i915_gem_retire_requests(dev); 184 i915_gem_retire_requests(dev);
177 185
178 /* Having flushed everything, unbind() should never raise an error */ 186 /* Having flushed everything, unbind() should never raise an error */
179 list_for_each_entry_safe(obj, next, 187 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
180 &dev_priv->mm.inactive_list, mm_list) 188 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
181 if (obj->pin_count == 0) 189 if (vma->obj->pin_count == 0)
182 WARN_ON(i915_gem_object_unbind(obj)); 190 WARN_ON(i915_vma_unbind(vma));
191 }
183 192
184 return 0; 193 return 0;
185} 194}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87a3227e5179..792c52a235ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -172,9 +172,60 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
172} 172}
173 173
174static int 174static int
175relocate_entry_cpu(struct drm_i915_gem_object *obj,
176 struct drm_i915_gem_relocation_entry *reloc)
177{
178 uint32_t page_offset = offset_in_page(reloc->offset);
179 char *vaddr;
180 int ret = -EINVAL;
181
182 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
183 if (ret)
184 return ret;
185
186 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
187 reloc->offset >> PAGE_SHIFT));
188 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
189 kunmap_atomic(vaddr);
190
191 return 0;
192}
193
194static int
195relocate_entry_gtt(struct drm_i915_gem_object *obj,
196 struct drm_i915_gem_relocation_entry *reloc)
197{
198 struct drm_device *dev = obj->base.dev;
199 struct drm_i915_private *dev_priv = dev->dev_private;
200 uint32_t __iomem *reloc_entry;
201 void __iomem *reloc_page;
202 int ret = -EINVAL;
203
204 ret = i915_gem_object_set_to_gtt_domain(obj, true);
205 if (ret)
206 return ret;
207
208 ret = i915_gem_object_put_fence(obj);
209 if (ret)
210 return ret;
211
212 /* Map the page containing the relocation we're going to perform. */
213 reloc->offset += i915_gem_obj_ggtt_offset(obj);
214 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
215 reloc->offset & PAGE_MASK);
216 reloc_entry = (uint32_t __iomem *)
217 (reloc_page + offset_in_page(reloc->offset));
218 iowrite32(reloc->delta, reloc_entry);
219 io_mapping_unmap_atomic(reloc_page);
220
221 return 0;
222}
223
224static int
175i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 225i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
176 struct eb_objects *eb, 226 struct eb_objects *eb,
177 struct drm_i915_gem_relocation_entry *reloc) 227 struct drm_i915_gem_relocation_entry *reloc,
228 struct i915_address_space *vm)
178{ 229{
179 struct drm_device *dev = obj->base.dev; 230 struct drm_device *dev = obj->base.dev;
180 struct drm_gem_object *target_obj; 231 struct drm_gem_object *target_obj;
@@ -188,7 +239,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
188 return -ENOENT; 239 return -ENOENT;
189 240
190 target_i915_obj = to_intel_bo(target_obj); 241 target_i915_obj = to_intel_bo(target_obj);
191 target_offset = target_i915_obj->gtt_offset; 242 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
192 243
193 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and 244 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
194 * pipe_control writes because the gpu doesn't properly redirect them 245 * pipe_control writes because the gpu doesn't properly redirect them
@@ -254,40 +305,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
254 return -EFAULT; 305 return -EFAULT;
255 306
256 reloc->delta += target_offset; 307 reloc->delta += target_offset;
257 if (use_cpu_reloc(obj)) { 308 if (use_cpu_reloc(obj))
258 uint32_t page_offset = reloc->offset & ~PAGE_MASK; 309 ret = relocate_entry_cpu(obj, reloc);
259 char *vaddr; 310 else
260 311 ret = relocate_entry_gtt(obj, reloc);
261 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
262 if (ret)
263 return ret;
264
265 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
266 reloc->offset >> PAGE_SHIFT));
267 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
268 kunmap_atomic(vaddr);
269 } else {
270 struct drm_i915_private *dev_priv = dev->dev_private;
271 uint32_t __iomem *reloc_entry;
272 void __iomem *reloc_page;
273
274 ret = i915_gem_object_set_to_gtt_domain(obj, true);
275 if (ret)
276 return ret;
277
278 ret = i915_gem_object_put_fence(obj);
279 if (ret)
280 return ret;
281
282 /* Map the page containing the relocation we're going to perform. */
283 reloc->offset += obj->gtt_offset;
284 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
285 reloc->offset & PAGE_MASK);
286 reloc_entry = (uint32_t __iomem *)
287 (reloc_page + (reloc->offset & ~PAGE_MASK));
288 iowrite32(reloc->delta, reloc_entry);
289 io_mapping_unmap_atomic(reloc_page);
290 }
291 312
292 /* and update the user's relocation entry */ 313 /* and update the user's relocation entry */
293 reloc->presumed_offset = target_offset; 314 reloc->presumed_offset = target_offset;
@@ -297,7 +318,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
297 318
298static int 319static int
299i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, 320i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
300 struct eb_objects *eb) 321 struct eb_objects *eb,
322 struct i915_address_space *vm)
301{ 323{
302#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) 324#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
303 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; 325 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
@@ -321,7 +343,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
321 do { 343 do {
322 u64 offset = r->presumed_offset; 344 u64 offset = r->presumed_offset;
323 345
324 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r); 346 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
347 vm);
325 if (ret) 348 if (ret)
326 return ret; 349 return ret;
327 350
@@ -344,13 +367,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
344static int 367static int
345i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, 368i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
346 struct eb_objects *eb, 369 struct eb_objects *eb,
347 struct drm_i915_gem_relocation_entry *relocs) 370 struct drm_i915_gem_relocation_entry *relocs,
371 struct i915_address_space *vm)
348{ 372{
349 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 373 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
350 int i, ret; 374 int i, ret;
351 375
352 for (i = 0; i < entry->relocation_count; i++) { 376 for (i = 0; i < entry->relocation_count; i++) {
353 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); 377 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
378 vm);
354 if (ret) 379 if (ret)
355 return ret; 380 return ret;
356 } 381 }
@@ -359,7 +384,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
359} 384}
360 385
361static int 386static int
362i915_gem_execbuffer_relocate(struct eb_objects *eb) 387i915_gem_execbuffer_relocate(struct eb_objects *eb,
388 struct i915_address_space *vm)
363{ 389{
364 struct drm_i915_gem_object *obj; 390 struct drm_i915_gem_object *obj;
365 int ret = 0; 391 int ret = 0;
@@ -373,7 +399,7 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
373 */ 399 */
374 pagefault_disable(); 400 pagefault_disable();
375 list_for_each_entry(obj, &eb->objects, exec_list) { 401 list_for_each_entry(obj, &eb->objects, exec_list) {
376 ret = i915_gem_execbuffer_relocate_object(obj, eb); 402 ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
377 if (ret) 403 if (ret)
378 break; 404 break;
379 } 405 }
@@ -395,6 +421,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
395static int 421static int
396i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, 422i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
397 struct intel_ring_buffer *ring, 423 struct intel_ring_buffer *ring,
424 struct i915_address_space *vm,
398 bool *need_reloc) 425 bool *need_reloc)
399{ 426{
400 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 427 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -409,7 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
409 obj->tiling_mode != I915_TILING_NONE; 436 obj->tiling_mode != I915_TILING_NONE;
410 need_mappable = need_fence || need_reloc_mappable(obj); 437 need_mappable = need_fence || need_reloc_mappable(obj);
411 438
412 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); 439 ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
440 false);
413 if (ret) 441 if (ret)
414 return ret; 442 return ret;
415 443
@@ -436,8 +464,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
436 obj->has_aliasing_ppgtt_mapping = 1; 464 obj->has_aliasing_ppgtt_mapping = 1;
437 } 465 }
438 466
439 if (entry->offset != obj->gtt_offset) { 467 if (entry->offset != i915_gem_obj_offset(obj, vm)) {
440 entry->offset = obj->gtt_offset; 468 entry->offset = i915_gem_obj_offset(obj, vm);
441 *need_reloc = true; 469 *need_reloc = true;
442 } 470 }
443 471
@@ -458,7 +486,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
458{ 486{
459 struct drm_i915_gem_exec_object2 *entry; 487 struct drm_i915_gem_exec_object2 *entry;
460 488
461 if (!obj->gtt_space) 489 if (!i915_gem_obj_bound_any(obj))
462 return; 490 return;
463 491
464 entry = obj->exec_entry; 492 entry = obj->exec_entry;
@@ -475,6 +503,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
475static int 503static int
476i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 504i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
477 struct list_head *objects, 505 struct list_head *objects,
506 struct i915_address_space *vm,
478 bool *need_relocs) 507 bool *need_relocs)
479{ 508{
480 struct drm_i915_gem_object *obj; 509 struct drm_i915_gem_object *obj;
@@ -529,31 +558,37 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
529 list_for_each_entry(obj, objects, exec_list) { 558 list_for_each_entry(obj, objects, exec_list) {
530 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 559 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
531 bool need_fence, need_mappable; 560 bool need_fence, need_mappable;
561 u32 obj_offset;
532 562
533 if (!obj->gtt_space) 563 if (!i915_gem_obj_bound(obj, vm))
534 continue; 564 continue;
535 565
566 obj_offset = i915_gem_obj_offset(obj, vm);
536 need_fence = 567 need_fence =
537 has_fenced_gpu_access && 568 has_fenced_gpu_access &&
538 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 569 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
539 obj->tiling_mode != I915_TILING_NONE; 570 obj->tiling_mode != I915_TILING_NONE;
540 need_mappable = need_fence || need_reloc_mappable(obj); 571 need_mappable = need_fence || need_reloc_mappable(obj);
541 572
542 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || 573 WARN_ON((need_mappable || need_fence) &&
574 !i915_is_ggtt(vm));
575
576 if ((entry->alignment &&
577 obj_offset & (entry->alignment - 1)) ||
543 (need_mappable && !obj->map_and_fenceable)) 578 (need_mappable && !obj->map_and_fenceable))
544 ret = i915_gem_object_unbind(obj); 579 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
545 else 580 else
546 ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); 581 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
547 if (ret) 582 if (ret)
548 goto err; 583 goto err;
549 } 584 }
550 585
551 /* Bind fresh objects */ 586 /* Bind fresh objects */
552 list_for_each_entry(obj, objects, exec_list) { 587 list_for_each_entry(obj, objects, exec_list) {
553 if (obj->gtt_space) 588 if (i915_gem_obj_bound(obj, vm))
554 continue; 589 continue;
555 590
556 ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); 591 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
557 if (ret) 592 if (ret)
558 goto err; 593 goto err;
559 } 594 }
@@ -577,7 +612,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
577 struct drm_file *file, 612 struct drm_file *file,
578 struct intel_ring_buffer *ring, 613 struct intel_ring_buffer *ring,
579 struct eb_objects *eb, 614 struct eb_objects *eb,
580 struct drm_i915_gem_exec_object2 *exec) 615 struct drm_i915_gem_exec_object2 *exec,
616 struct i915_address_space *vm)
581{ 617{
582 struct drm_i915_gem_relocation_entry *reloc; 618 struct drm_i915_gem_relocation_entry *reloc;
583 struct drm_i915_gem_object *obj; 619 struct drm_i915_gem_object *obj;
@@ -661,14 +697,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
661 goto err; 697 goto err;
662 698
663 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 699 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
664 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); 700 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
665 if (ret) 701 if (ret)
666 goto err; 702 goto err;
667 703
668 list_for_each_entry(obj, &eb->objects, exec_list) { 704 list_for_each_entry(obj, &eb->objects, exec_list) {
669 int offset = obj->exec_entry - exec; 705 int offset = obj->exec_entry - exec;
670 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 706 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
671 reloc + reloc_offset[offset]); 707 reloc + reloc_offset[offset],
708 vm);
672 if (ret) 709 if (ret)
673 goto err; 710 goto err;
674 } 711 }
@@ -691,6 +728,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
691{ 728{
692 struct drm_i915_gem_object *obj; 729 struct drm_i915_gem_object *obj;
693 uint32_t flush_domains = 0; 730 uint32_t flush_domains = 0;
731 bool flush_chipset = false;
694 int ret; 732 int ret;
695 733
696 list_for_each_entry(obj, objects, exec_list) { 734 list_for_each_entry(obj, objects, exec_list) {
@@ -699,12 +737,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
699 return ret; 737 return ret;
700 738
701 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) 739 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
702 i915_gem_clflush_object(obj); 740 flush_chipset |= i915_gem_clflush_object(obj, false);
703 741
704 flush_domains |= obj->base.write_domain; 742 flush_domains |= obj->base.write_domain;
705 } 743 }
706 744
707 if (flush_domains & I915_GEM_DOMAIN_CPU) 745 if (flush_chipset)
708 i915_gem_chipset_flush(ring->dev); 746 i915_gem_chipset_flush(ring->dev);
709 747
710 if (flush_domains & I915_GEM_DOMAIN_GTT) 748 if (flush_domains & I915_GEM_DOMAIN_GTT)
@@ -758,8 +796,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
758 if (!access_ok(VERIFY_WRITE, ptr, length)) 796 if (!access_ok(VERIFY_WRITE, ptr, length))
759 return -EFAULT; 797 return -EFAULT;
760 798
761 if (fault_in_multipages_readable(ptr, length)) 799 if (likely(!i915_prefault_disable)) {
762 return -EFAULT; 800 if (fault_in_multipages_readable(ptr, length))
801 return -EFAULT;
802 }
763 } 803 }
764 804
765 return 0; 805 return 0;
@@ -767,6 +807,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
767 807
768static void 808static void
769i915_gem_execbuffer_move_to_active(struct list_head *objects, 809i915_gem_execbuffer_move_to_active(struct list_head *objects,
810 struct i915_address_space *vm,
770 struct intel_ring_buffer *ring) 811 struct intel_ring_buffer *ring)
771{ 812{
772 struct drm_i915_gem_object *obj; 813 struct drm_i915_gem_object *obj;
@@ -781,6 +822,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
781 obj->base.read_domains = obj->base.pending_read_domains; 822 obj->base.read_domains = obj->base.pending_read_domains;
782 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 823 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
783 824
825 /* FIXME: This lookup gets fixed later <-- danvet */
826 list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
784 i915_gem_object_move_to_active(obj, ring); 827 i915_gem_object_move_to_active(obj, ring);
785 if (obj->base.write_domain) { 828 if (obj->base.write_domain) {
786 obj->dirty = 1; 829 obj->dirty = 1;
@@ -835,7 +878,8 @@ static int
835i915_gem_do_execbuffer(struct drm_device *dev, void *data, 878i915_gem_do_execbuffer(struct drm_device *dev, void *data,
836 struct drm_file *file, 879 struct drm_file *file,
837 struct drm_i915_gem_execbuffer2 *args, 880 struct drm_i915_gem_execbuffer2 *args,
838 struct drm_i915_gem_exec_object2 *exec) 881 struct drm_i915_gem_exec_object2 *exec,
882 struct i915_address_space *vm)
839{ 883{
840 drm_i915_private_t *dev_priv = dev->dev_private; 884 drm_i915_private_t *dev_priv = dev->dev_private;
841 struct eb_objects *eb; 885 struct eb_objects *eb;
@@ -872,7 +916,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
872 break; 916 break;
873 case I915_EXEC_BSD: 917 case I915_EXEC_BSD:
874 ring = &dev_priv->ring[VCS]; 918 ring = &dev_priv->ring[VCS];
875 if (ctx_id != 0) { 919 if (ctx_id != DEFAULT_CONTEXT_ID) {
876 DRM_DEBUG("Ring %s doesn't support contexts\n", 920 DRM_DEBUG("Ring %s doesn't support contexts\n",
877 ring->name); 921 ring->name);
878 return -EPERM; 922 return -EPERM;
@@ -880,7 +924,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
880 break; 924 break;
881 case I915_EXEC_BLT: 925 case I915_EXEC_BLT:
882 ring = &dev_priv->ring[BCS]; 926 ring = &dev_priv->ring[BCS];
883 if (ctx_id != 0) { 927 if (ctx_id != DEFAULT_CONTEXT_ID) {
884 DRM_DEBUG("Ring %s doesn't support contexts\n", 928 DRM_DEBUG("Ring %s doesn't support contexts\n",
885 ring->name); 929 ring->name);
886 return -EPERM; 930 return -EPERM;
@@ -888,7 +932,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
888 break; 932 break;
889 case I915_EXEC_VEBOX: 933 case I915_EXEC_VEBOX:
890 ring = &dev_priv->ring[VECS]; 934 ring = &dev_priv->ring[VECS];
891 if (ctx_id != 0) { 935 if (ctx_id != DEFAULT_CONTEXT_ID) {
892 DRM_DEBUG("Ring %s doesn't support contexts\n", 936 DRM_DEBUG("Ring %s doesn't support contexts\n",
893 ring->name); 937 ring->name);
894 return -EPERM; 938 return -EPERM;
@@ -972,7 +1016,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
972 if (ret) 1016 if (ret)
973 goto pre_mutex_err; 1017 goto pre_mutex_err;
974 1018
975 if (dev_priv->mm.suspended) { 1019 if (dev_priv->ums.mm_suspended) {
976 mutex_unlock(&dev->struct_mutex); 1020 mutex_unlock(&dev->struct_mutex);
977 ret = -EBUSY; 1021 ret = -EBUSY;
978 goto pre_mutex_err; 1022 goto pre_mutex_err;
@@ -997,17 +1041,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
997 1041
998 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1042 /* Move the objects en-masse into the GTT, evicting if necessary. */
999 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1043 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1000 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); 1044 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
1001 if (ret) 1045 if (ret)
1002 goto err; 1046 goto err;
1003 1047
1004 /* The objects are in their final locations, apply the relocations. */ 1048 /* The objects are in their final locations, apply the relocations. */
1005 if (need_relocs) 1049 if (need_relocs)
1006 ret = i915_gem_execbuffer_relocate(eb); 1050 ret = i915_gem_execbuffer_relocate(eb, vm);
1007 if (ret) { 1051 if (ret) {
1008 if (ret == -EFAULT) { 1052 if (ret == -EFAULT) {
1009 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, 1053 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1010 eb, exec); 1054 eb, exec, vm);
1011 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1055 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1012 } 1056 }
1013 if (ret) 1057 if (ret)
@@ -1058,7 +1102,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1058 goto err; 1102 goto err;
1059 } 1103 }
1060 1104
1061 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1105 exec_start = i915_gem_obj_offset(batch_obj, vm) +
1106 args->batch_start_offset;
1062 exec_len = args->batch_len; 1107 exec_len = args->batch_len;
1063 if (cliprects) { 1108 if (cliprects) {
1064 for (i = 0; i < args->num_cliprects; i++) { 1109 for (i = 0; i < args->num_cliprects; i++) {
@@ -1083,7 +1128,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1083 1128
1084 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); 1129 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1085 1130
1086 i915_gem_execbuffer_move_to_active(&eb->objects, ring); 1131 i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
1087 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1132 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1088 1133
1089err: 1134err:
@@ -1104,6 +1149,7 @@ int
1104i915_gem_execbuffer(struct drm_device *dev, void *data, 1149i915_gem_execbuffer(struct drm_device *dev, void *data,
1105 struct drm_file *file) 1150 struct drm_file *file)
1106{ 1151{
1152 struct drm_i915_private *dev_priv = dev->dev_private;
1107 struct drm_i915_gem_execbuffer *args = data; 1153 struct drm_i915_gem_execbuffer *args = data;
1108 struct drm_i915_gem_execbuffer2 exec2; 1154 struct drm_i915_gem_execbuffer2 exec2;
1109 struct drm_i915_gem_exec_object *exec_list = NULL; 1155 struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1159,7 +1205,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1159 exec2.flags = I915_EXEC_RENDER; 1205 exec2.flags = I915_EXEC_RENDER;
1160 i915_execbuffer2_set_context_id(exec2, 0); 1206 i915_execbuffer2_set_context_id(exec2, 0);
1161 1207
1162 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1208 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
1209 &dev_priv->gtt.base);
1163 if (!ret) { 1210 if (!ret) {
1164 /* Copy the new buffer offsets back to the user's exec list. */ 1211 /* Copy the new buffer offsets back to the user's exec list. */
1165 for (i = 0; i < args->buffer_count; i++) 1212 for (i = 0; i < args->buffer_count; i++)
@@ -1185,6 +1232,7 @@ int
1185i915_gem_execbuffer2(struct drm_device *dev, void *data, 1232i915_gem_execbuffer2(struct drm_device *dev, void *data,
1186 struct drm_file *file) 1233 struct drm_file *file)
1187{ 1234{
1235 struct drm_i915_private *dev_priv = dev->dev_private;
1188 struct drm_i915_gem_execbuffer2 *args = data; 1236 struct drm_i915_gem_execbuffer2 *args = data;
1189 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1237 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1190 int ret; 1238 int ret;
@@ -1215,7 +1263,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1215 return -EFAULT; 1263 return -EFAULT;
1216 } 1264 }
1217 1265
1218 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1266 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1267 &dev_priv->gtt.base);
1219 if (!ret) { 1268 if (!ret) {
1220 /* Copy the new buffer offsets back to the user's exec list. */ 1269 /* Copy the new buffer offsets back to the user's exec list. */
1221 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1270 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5101ab6869b4..212f6d8c35ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,8 +28,12 @@
28#include "i915_trace.h" 28#include "i915_trace.h"
29#include "intel_drv.h" 29#include "intel_drv.h"
30 30
31#define GEN6_PPGTT_PD_ENTRIES 512
32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
33
31/* PPGTT stuff */ 34/* PPGTT stuff */
32#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 35#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
36#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
33 37
34#define GEN6_PDE_VALID (1 << 0) 38#define GEN6_PDE_VALID (1 << 0)
35/* gen6+ has bit 11-4 for physical addr bit 39-32 */ 39/* gen6+ has bit 11-4 for physical addr bit 39-32 */
@@ -39,19 +43,50 @@
39#define GEN6_PTE_UNCACHED (1 << 1) 43#define GEN6_PTE_UNCACHED (1 << 1)
40#define HSW_PTE_UNCACHED (0) 44#define HSW_PTE_UNCACHED (0)
41#define GEN6_PTE_CACHE_LLC (2 << 1) 45#define GEN6_PTE_CACHE_LLC (2 << 1)
42#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) 46#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
43#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 47#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
48#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
44 49
45static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, 50/* Cacheability Control is a 4-bit value. The low three bits are stored in *
46 dma_addr_t addr, 51 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
47 enum i915_cache_level level) 52 */
53#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
54 (((bits) & 0x8) << (11 - 3)))
55#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
56#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
57#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
59
60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
61 enum i915_cache_level level)
48{ 62{
49 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 63 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
50 pte |= GEN6_PTE_ADDR_ENCODE(addr); 64 pte |= GEN6_PTE_ADDR_ENCODE(addr);
51 65
52 switch (level) { 66 switch (level) {
53 case I915_CACHE_LLC_MLC: 67 case I915_CACHE_L3_LLC:
54 pte |= GEN6_PTE_CACHE_LLC_MLC; 68 case I915_CACHE_LLC:
69 pte |= GEN6_PTE_CACHE_LLC;
70 break;
71 case I915_CACHE_NONE:
72 pte |= GEN6_PTE_UNCACHED;
73 break;
74 default:
75 WARN_ON(1);
76 }
77
78 return pte;
79}
80
81static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
82 enum i915_cache_level level)
83{
84 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
85 pte |= GEN6_PTE_ADDR_ENCODE(addr);
86
87 switch (level) {
88 case I915_CACHE_L3_LLC:
89 pte |= GEN7_PTE_CACHE_L3_LLC;
55 break; 90 break;
56 case I915_CACHE_LLC: 91 case I915_CACHE_LLC:
57 pte |= GEN6_PTE_CACHE_LLC; 92 pte |= GEN6_PTE_CACHE_LLC;
@@ -60,7 +95,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
60 pte |= GEN6_PTE_UNCACHED; 95 pte |= GEN6_PTE_UNCACHED;
61 break; 96 break;
62 default: 97 default:
63 BUG(); 98 WARN_ON(1);
64 } 99 }
65 100
66 return pte; 101 return pte;
@@ -69,8 +104,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
69#define BYT_PTE_WRITEABLE (1 << 1) 104#define BYT_PTE_WRITEABLE (1 << 1)
70#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) 105#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
71 106
72static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev, 107static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
73 dma_addr_t addr,
74 enum i915_cache_level level) 108 enum i915_cache_level level)
75{ 109{
76 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 110 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -87,22 +121,41 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
87 return pte; 121 return pte;
88} 122}
89 123
90static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev, 124static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
91 dma_addr_t addr,
92 enum i915_cache_level level) 125 enum i915_cache_level level)
93{ 126{
94 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 127 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
95 pte |= GEN6_PTE_ADDR_ENCODE(addr); 128 pte |= HSW_PTE_ADDR_ENCODE(addr);
96 129
97 if (level != I915_CACHE_NONE) 130 if (level != I915_CACHE_NONE)
98 pte |= GEN6_PTE_CACHE_LLC; 131 pte |= HSW_WB_LLC_AGE3;
132
133 return pte;
134}
135
136static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
137 enum i915_cache_level level)
138{
139 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
140 pte |= HSW_PTE_ADDR_ENCODE(addr);
141
142 switch (level) {
143 case I915_CACHE_NONE:
144 break;
145 case I915_CACHE_WT:
146 pte |= HSW_WT_ELLC_LLC_AGE0;
147 break;
148 default:
149 pte |= HSW_WB_ELLC_LLC_AGE0;
150 break;
151 }
99 152
100 return pte; 153 return pte;
101} 154}
102 155
103static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) 156static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
104{ 157{
105 struct drm_i915_private *dev_priv = ppgtt->dev->dev_private; 158 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
106 gen6_gtt_pte_t __iomem *pd_addr; 159 gen6_gtt_pte_t __iomem *pd_addr;
107 uint32_t pd_entry; 160 uint32_t pd_entry;
108 int i; 161 int i;
@@ -181,18 +234,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
181} 234}
182 235
183/* PPGTT support for Sandybdrige/Gen6 and later */ 236/* PPGTT support for Sandybdrige/Gen6 and later */
184static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, 237static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
185 unsigned first_entry, 238 unsigned first_entry,
186 unsigned num_entries) 239 unsigned num_entries)
187{ 240{
241 struct i915_hw_ppgtt *ppgtt =
242 container_of(vm, struct i915_hw_ppgtt, base);
188 gen6_gtt_pte_t *pt_vaddr, scratch_pte; 243 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
189 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 244 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
190 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 245 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
191 unsigned last_pte, i; 246 unsigned last_pte, i;
192 247
193 scratch_pte = ppgtt->pte_encode(ppgtt->dev, 248 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
194 ppgtt->scratch_page_dma_addr,
195 I915_CACHE_LLC);
196 249
197 while (num_entries) { 250 while (num_entries) {
198 last_pte = first_pte + num_entries; 251 last_pte = first_pte + num_entries;
@@ -212,11 +265,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
212 } 265 }
213} 266}
214 267
215static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, 268static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
216 struct sg_table *pages, 269 struct sg_table *pages,
217 unsigned first_entry, 270 unsigned first_entry,
218 enum i915_cache_level cache_level) 271 enum i915_cache_level cache_level)
219{ 272{
273 struct i915_hw_ppgtt *ppgtt =
274 container_of(vm, struct i915_hw_ppgtt, base);
220 gen6_gtt_pte_t *pt_vaddr; 275 gen6_gtt_pte_t *pt_vaddr;
221 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 276 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
222 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; 277 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
@@ -227,8 +282,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
227 dma_addr_t page_addr; 282 dma_addr_t page_addr;
228 283
229 page_addr = sg_page_iter_dma_address(&sg_iter); 284 page_addr = sg_page_iter_dma_address(&sg_iter);
230 pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr, 285 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
231 cache_level);
232 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 286 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
233 kunmap_atomic(pt_vaddr); 287 kunmap_atomic(pt_vaddr);
234 act_pt++; 288 act_pt++;
@@ -240,13 +294,17 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
240 kunmap_atomic(pt_vaddr); 294 kunmap_atomic(pt_vaddr);
241} 295}
242 296
243static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) 297static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
244{ 298{
299 struct i915_hw_ppgtt *ppgtt =
300 container_of(vm, struct i915_hw_ppgtt, base);
245 int i; 301 int i;
246 302
303 drm_mm_takedown(&ppgtt->base.mm);
304
247 if (ppgtt->pt_dma_addr) { 305 if (ppgtt->pt_dma_addr) {
248 for (i = 0; i < ppgtt->num_pd_entries; i++) 306 for (i = 0; i < ppgtt->num_pd_entries; i++)
249 pci_unmap_page(ppgtt->dev->pdev, 307 pci_unmap_page(ppgtt->base.dev->pdev,
250 ppgtt->pt_dma_addr[i], 308 ppgtt->pt_dma_addr[i],
251 4096, PCI_DMA_BIDIRECTIONAL); 309 4096, PCI_DMA_BIDIRECTIONAL);
252 } 310 }
@@ -260,7 +318,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
260 318
261static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 319static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
262{ 320{
263 struct drm_device *dev = ppgtt->dev; 321 struct drm_device *dev = ppgtt->base.dev;
264 struct drm_i915_private *dev_priv = dev->dev_private; 322 struct drm_i915_private *dev_priv = dev->dev_private;
265 unsigned first_pd_entry_in_global_pt; 323 unsigned first_pd_entry_in_global_pt;
266 int i; 324 int i;
@@ -271,18 +329,13 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
271 * now. */ 329 * now. */
272 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); 330 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
273 331
274 if (IS_HASWELL(dev)) { 332 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
275 ppgtt->pte_encode = hsw_pte_encode; 333 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
276 } else if (IS_VALLEYVIEW(dev)) {
277 ppgtt->pte_encode = byt_pte_encode;
278 } else {
279 ppgtt->pte_encode = gen6_pte_encode;
280 }
281 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
282 ppgtt->enable = gen6_ppgtt_enable; 334 ppgtt->enable = gen6_ppgtt_enable;
283 ppgtt->clear_range = gen6_ppgtt_clear_range; 335 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
284 ppgtt->insert_entries = gen6_ppgtt_insert_entries; 336 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
285 ppgtt->cleanup = gen6_ppgtt_cleanup; 337 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
338 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
286 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 339 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
287 GFP_KERNEL); 340 GFP_KERNEL);
288 if (!ppgtt->pt_pages) 341 if (!ppgtt->pt_pages)
@@ -313,8 +366,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
313 ppgtt->pt_dma_addr[i] = pt_addr; 366 ppgtt->pt_dma_addr[i] = pt_addr;
314 } 367 }
315 368
316 ppgtt->clear_range(ppgtt, 0, 369 ppgtt->base.clear_range(&ppgtt->base, 0,
317 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); 370 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
318 371
319 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); 372 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
320 373
@@ -347,8 +400,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
347 if (!ppgtt) 400 if (!ppgtt)
348 return -ENOMEM; 401 return -ENOMEM;
349 402
350 ppgtt->dev = dev; 403 ppgtt->base.dev = dev;
351 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
352 404
353 if (INTEL_INFO(dev)->gen < 8) 405 if (INTEL_INFO(dev)->gen < 8)
354 ret = gen6_ppgtt_init(ppgtt); 406 ret = gen6_ppgtt_init(ppgtt);
@@ -357,8 +409,11 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
357 409
358 if (ret) 410 if (ret)
359 kfree(ppgtt); 411 kfree(ppgtt);
360 else 412 else {
361 dev_priv->mm.aliasing_ppgtt = ppgtt; 413 dev_priv->mm.aliasing_ppgtt = ppgtt;
414 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
415 ppgtt->base.total);
416 }
362 417
363 return ret; 418 return ret;
364} 419}
@@ -371,7 +426,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
371 if (!ppgtt) 426 if (!ppgtt)
372 return; 427 return;
373 428
374 ppgtt->cleanup(ppgtt); 429 ppgtt->base.cleanup(&ppgtt->base);
375 dev_priv->mm.aliasing_ppgtt = NULL; 430 dev_priv->mm.aliasing_ppgtt = NULL;
376} 431}
377 432
@@ -379,17 +434,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
379 struct drm_i915_gem_object *obj, 434 struct drm_i915_gem_object *obj,
380 enum i915_cache_level cache_level) 435 enum i915_cache_level cache_level)
381{ 436{
382 ppgtt->insert_entries(ppgtt, obj->pages, 437 ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
383 obj->gtt_space->start >> PAGE_SHIFT, 438 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
384 cache_level); 439 cache_level);
385} 440}
386 441
387void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 442void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
388 struct drm_i915_gem_object *obj) 443 struct drm_i915_gem_object *obj)
389{ 444{
390 ppgtt->clear_range(ppgtt, 445 ppgtt->base.clear_range(&ppgtt->base,
391 obj->gtt_space->start >> PAGE_SHIFT, 446 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
392 obj->base.size >> PAGE_SHIFT); 447 obj->base.size >> PAGE_SHIFT);
393} 448}
394 449
395extern int intel_iommu_gfx_mapped; 450extern int intel_iommu_gfx_mapped;
@@ -436,11 +491,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
436 struct drm_i915_gem_object *obj; 491 struct drm_i915_gem_object *obj;
437 492
438 /* First fill our portion of the GTT with scratch pages */ 493 /* First fill our portion of the GTT with scratch pages */
439 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, 494 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
440 dev_priv->gtt.total / PAGE_SIZE); 495 dev_priv->gtt.base.start / PAGE_SIZE,
496 dev_priv->gtt.base.total / PAGE_SIZE);
441 497
442 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 498 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
443 i915_gem_clflush_object(obj); 499 i915_gem_clflush_object(obj, obj->pin_display);
444 i915_gem_gtt_bind_object(obj, obj->cache_level); 500 i915_gem_gtt_bind_object(obj, obj->cache_level);
445 } 501 }
446 502
@@ -466,12 +522,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
466 * within the global GTT as well as accessible by the GPU through the GMADR 522 * within the global GTT as well as accessible by the GPU through the GMADR
467 * mapped BAR (dev_priv->mm.gtt->gtt). 523 * mapped BAR (dev_priv->mm.gtt->gtt).
468 */ 524 */
469static void gen6_ggtt_insert_entries(struct drm_device *dev, 525static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
470 struct sg_table *st, 526 struct sg_table *st,
471 unsigned int first_entry, 527 unsigned int first_entry,
472 enum i915_cache_level level) 528 enum i915_cache_level level)
473{ 529{
474 struct drm_i915_private *dev_priv = dev->dev_private; 530 struct drm_i915_private *dev_priv = vm->dev->dev_private;
475 gen6_gtt_pte_t __iomem *gtt_entries = 531 gen6_gtt_pte_t __iomem *gtt_entries =
476 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 532 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
477 int i = 0; 533 int i = 0;
@@ -480,8 +536,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
480 536
481 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 537 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
482 addr = sg_page_iter_dma_address(&sg_iter); 538 addr = sg_page_iter_dma_address(&sg_iter);
483 iowrite32(dev_priv->gtt.pte_encode(dev, addr, level), 539 iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
484 &gtt_entries[i]);
485 i++; 540 i++;
486 } 541 }
487 542
@@ -492,8 +547,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
492 * hardware should work, we must keep this posting read for paranoia. 547 * hardware should work, we must keep this posting read for paranoia.
493 */ 548 */
494 if (i != 0) 549 if (i != 0)
495 WARN_ON(readl(&gtt_entries[i-1]) 550 WARN_ON(readl(&gtt_entries[i-1]) !=
496 != dev_priv->gtt.pte_encode(dev, addr, level)); 551 vm->pte_encode(addr, level));
497 552
498 /* This next bit makes the above posting read even more important. We 553 /* This next bit makes the above posting read even more important. We
499 * want to flush the TLBs only after we're certain all the PTE updates 554 * want to flush the TLBs only after we're certain all the PTE updates
@@ -503,11 +558,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
503 POSTING_READ(GFX_FLSH_CNTL_GEN6); 558 POSTING_READ(GFX_FLSH_CNTL_GEN6);
504} 559}
505 560
506static void gen6_ggtt_clear_range(struct drm_device *dev, 561static void gen6_ggtt_clear_range(struct i915_address_space *vm,
507 unsigned int first_entry, 562 unsigned int first_entry,
508 unsigned int num_entries) 563 unsigned int num_entries)
509{ 564{
510 struct drm_i915_private *dev_priv = dev->dev_private; 565 struct drm_i915_private *dev_priv = vm->dev->dev_private;
511 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = 566 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
512 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 567 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
513 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 568 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -518,16 +573,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
518 first_entry, num_entries, max_entries)) 573 first_entry, num_entries, max_entries))
519 num_entries = max_entries; 574 num_entries = max_entries;
520 575
521 scratch_pte = dev_priv->gtt.pte_encode(dev, 576 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
522 dev_priv->gtt.scratch_page_dma,
523 I915_CACHE_LLC);
524 for (i = 0; i < num_entries; i++) 577 for (i = 0; i < num_entries; i++)
525 iowrite32(scratch_pte, &gtt_base[i]); 578 iowrite32(scratch_pte, &gtt_base[i]);
526 readl(gtt_base); 579 readl(gtt_base);
527} 580}
528 581
529 582
530static void i915_ggtt_insert_entries(struct drm_device *dev, 583static void i915_ggtt_insert_entries(struct i915_address_space *vm,
531 struct sg_table *st, 584 struct sg_table *st,
532 unsigned int pg_start, 585 unsigned int pg_start,
533 enum i915_cache_level cache_level) 586 enum i915_cache_level cache_level)
@@ -539,7 +592,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev,
539 592
540} 593}
541 594
542static void i915_ggtt_clear_range(struct drm_device *dev, 595static void i915_ggtt_clear_range(struct i915_address_space *vm,
543 unsigned int first_entry, 596 unsigned int first_entry,
544 unsigned int num_entries) 597 unsigned int num_entries)
545{ 598{
@@ -552,10 +605,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
552{ 605{
553 struct drm_device *dev = obj->base.dev; 606 struct drm_device *dev = obj->base.dev;
554 struct drm_i915_private *dev_priv = dev->dev_private; 607 struct drm_i915_private *dev_priv = dev->dev_private;
608 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
555 609
556 dev_priv->gtt.gtt_insert_entries(dev, obj->pages, 610 dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
557 obj->gtt_space->start >> PAGE_SHIFT, 611 entry,
558 cache_level); 612 cache_level);
559 613
560 obj->has_global_gtt_mapping = 1; 614 obj->has_global_gtt_mapping = 1;
561} 615}
@@ -564,10 +618,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
564{ 618{
565 struct drm_device *dev = obj->base.dev; 619 struct drm_device *dev = obj->base.dev;
566 struct drm_i915_private *dev_priv = dev->dev_private; 620 struct drm_i915_private *dev_priv = dev->dev_private;
621 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
567 622
568 dev_priv->gtt.gtt_clear_range(obj->base.dev, 623 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
569 obj->gtt_space->start >> PAGE_SHIFT, 624 entry,
570 obj->base.size >> PAGE_SHIFT); 625 obj->base.size >> PAGE_SHIFT);
571 626
572 obj->has_global_gtt_mapping = 0; 627 obj->has_global_gtt_mapping = 0;
573} 628}
@@ -618,7 +673,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
618 * aperture. One page should be enough to keep any prefetching inside 673 * aperture. One page should be enough to keep any prefetching inside
619 * of the aperture. 674 * of the aperture.
620 */ 675 */
621 drm_i915_private_t *dev_priv = dev->dev_private; 676 struct drm_i915_private *dev_priv = dev->dev_private;
677 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
622 struct drm_mm_node *entry; 678 struct drm_mm_node *entry;
623 struct drm_i915_gem_object *obj; 679 struct drm_i915_gem_object *obj;
624 unsigned long hole_start, hole_end; 680 unsigned long hole_start, hole_end;
@@ -626,37 +682,38 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
626 BUG_ON(mappable_end > end); 682 BUG_ON(mappable_end > end);
627 683
628 /* Subtract the guard page ... */ 684 /* Subtract the guard page ... */
629 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); 685 drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
630 if (!HAS_LLC(dev)) 686 if (!HAS_LLC(dev))
631 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; 687 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
632 688
633 /* Mark any preallocated objects as occupied */ 689 /* Mark any preallocated objects as occupied */
634 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 690 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
635 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", 691 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
636 obj->gtt_offset, obj->base.size); 692 int ret;
637 693 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
638 BUG_ON(obj->gtt_space != I915_GTT_RESERVED); 694 i915_gem_obj_ggtt_offset(obj), obj->base.size);
639 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, 695
640 obj->gtt_offset, 696 WARN_ON(i915_gem_obj_ggtt_bound(obj));
641 obj->base.size, 697 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
642 false); 698 if (ret)
699 DRM_DEBUG_KMS("Reservation failed\n");
643 obj->has_global_gtt_mapping = 1; 700 obj->has_global_gtt_mapping = 1;
701 list_add(&vma->vma_link, &obj->vma_list);
644 } 702 }
645 703
646 dev_priv->gtt.start = start; 704 dev_priv->gtt.base.start = start;
647 dev_priv->gtt.total = end - start; 705 dev_priv->gtt.base.total = end - start;
648 706
649 /* Clear any non-preallocated blocks */ 707 /* Clear any non-preallocated blocks */
650 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, 708 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
651 hole_start, hole_end) { 709 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
652 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 710 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
653 hole_start, hole_end); 711 hole_start, hole_end);
654 dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE, 712 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
655 (hole_end-hole_start) / PAGE_SIZE);
656 } 713 }
657 714
658 /* And finally clear the reserved guard page */ 715 /* And finally clear the reserved guard page */
659 dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1); 716 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
660} 717}
661 718
662static bool 719static bool
@@ -679,7 +736,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
679 struct drm_i915_private *dev_priv = dev->dev_private; 736 struct drm_i915_private *dev_priv = dev->dev_private;
680 unsigned long gtt_size, mappable_size; 737 unsigned long gtt_size, mappable_size;
681 738
682 gtt_size = dev_priv->gtt.total; 739 gtt_size = dev_priv->gtt.base.total;
683 mappable_size = dev_priv->gtt.mappable_end; 740 mappable_size = dev_priv->gtt.mappable_end;
684 741
685 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { 742 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
@@ -688,7 +745,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
688 if (INTEL_INFO(dev)->gen <= 7) { 745 if (INTEL_INFO(dev)->gen <= 7) {
689 /* PPGTT pdes are stolen from global gtt ptes, so shrink the 746 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
690 * aperture accordingly when using aliasing ppgtt. */ 747 * aperture accordingly when using aliasing ppgtt. */
691 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 748 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
692 } 749 }
693 750
694 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 751 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
@@ -698,8 +755,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
698 return; 755 return;
699 756
700 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); 757 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
701 drm_mm_takedown(&dev_priv->mm.gtt_space); 758 drm_mm_takedown(&dev_priv->gtt.base.mm);
702 gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 759 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
703 } 760 }
704 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 761 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
705} 762}
@@ -724,8 +781,8 @@ static int setup_scratch_page(struct drm_device *dev)
724#else 781#else
725 dma_addr = page_to_phys(page); 782 dma_addr = page_to_phys(page);
726#endif 783#endif
727 dev_priv->gtt.scratch_page = page; 784 dev_priv->gtt.base.scratch.page = page;
728 dev_priv->gtt.scratch_page_dma = dma_addr; 785 dev_priv->gtt.base.scratch.addr = dma_addr;
729 786
730 return 0; 787 return 0;
731} 788}
@@ -733,11 +790,13 @@ static int setup_scratch_page(struct drm_device *dev)
733static void teardown_scratch_page(struct drm_device *dev) 790static void teardown_scratch_page(struct drm_device *dev)
734{ 791{
735 struct drm_i915_private *dev_priv = dev->dev_private; 792 struct drm_i915_private *dev_priv = dev->dev_private;
736 set_pages_wb(dev_priv->gtt.scratch_page, 1); 793 struct page *page = dev_priv->gtt.base.scratch.page;
737 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, 794
795 set_pages_wb(page, 1);
796 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
738 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 797 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
739 put_page(dev_priv->gtt.scratch_page); 798 put_page(page);
740 __free_page(dev_priv->gtt.scratch_page); 799 __free_page(page);
741} 800}
742 801
743static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 802static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -800,17 +859,18 @@ static int gen6_gmch_probe(struct drm_device *dev,
800 if (ret) 859 if (ret)
801 DRM_ERROR("Scratch setup failed\n"); 860 DRM_ERROR("Scratch setup failed\n");
802 861
803 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range; 862 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
804 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries; 863 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
805 864
806 return ret; 865 return ret;
807} 866}
808 867
809static void gen6_gmch_remove(struct drm_device *dev) 868static void gen6_gmch_remove(struct i915_address_space *vm)
810{ 869{
811 struct drm_i915_private *dev_priv = dev->dev_private; 870
812 iounmap(dev_priv->gtt.gsm); 871 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
813 teardown_scratch_page(dev_priv->dev); 872 iounmap(gtt->gsm);
873 teardown_scratch_page(vm->dev);
814} 874}
815 875
816static int i915_gmch_probe(struct drm_device *dev, 876static int i915_gmch_probe(struct drm_device *dev,
@@ -831,13 +891,13 @@ static int i915_gmch_probe(struct drm_device *dev,
831 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); 891 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
832 892
833 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); 893 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
834 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; 894 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
835 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries; 895 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
836 896
837 return 0; 897 return 0;
838} 898}
839 899
840static void i915_gmch_remove(struct drm_device *dev) 900static void i915_gmch_remove(struct i915_address_space *vm)
841{ 901{
842 intel_gmch_remove(); 902 intel_gmch_remove();
843} 903}
@@ -849,34 +909,35 @@ int i915_gem_gtt_init(struct drm_device *dev)
849 int ret; 909 int ret;
850 910
851 if (INTEL_INFO(dev)->gen <= 5) { 911 if (INTEL_INFO(dev)->gen <= 5) {
852 dev_priv->gtt.gtt_probe = i915_gmch_probe; 912 gtt->gtt_probe = i915_gmch_probe;
853 dev_priv->gtt.gtt_remove = i915_gmch_remove; 913 gtt->base.cleanup = i915_gmch_remove;
854 } else { 914 } else {
855 dev_priv->gtt.gtt_probe = gen6_gmch_probe; 915 gtt->gtt_probe = gen6_gmch_probe;
856 dev_priv->gtt.gtt_remove = gen6_gmch_remove; 916 gtt->base.cleanup = gen6_gmch_remove;
857 if (IS_HASWELL(dev)) { 917 if (IS_HASWELL(dev) && dev_priv->ellc_size)
858 dev_priv->gtt.pte_encode = hsw_pte_encode; 918 gtt->base.pte_encode = iris_pte_encode;
859 } else if (IS_VALLEYVIEW(dev)) { 919 else if (IS_HASWELL(dev))
860 dev_priv->gtt.pte_encode = byt_pte_encode; 920 gtt->base.pte_encode = hsw_pte_encode;
861 } else { 921 else if (IS_VALLEYVIEW(dev))
862 dev_priv->gtt.pte_encode = gen6_pte_encode; 922 gtt->base.pte_encode = byt_pte_encode;
863 } 923 else if (INTEL_INFO(dev)->gen >= 7)
924 gtt->base.pte_encode = ivb_pte_encode;
925 else
926 gtt->base.pte_encode = snb_pte_encode;
864 } 927 }
865 928
866 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, 929 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
867 &dev_priv->gtt.stolen_size, 930 &gtt->mappable_base, &gtt->mappable_end);
868 &gtt->mappable_base,
869 &gtt->mappable_end);
870 if (ret) 931 if (ret)
871 return ret; 932 return ret;
872 933
934 gtt->base.dev = dev;
935
873 /* GMADR is the PCI mmio aperture into the global GTT. */ 936 /* GMADR is the PCI mmio aperture into the global GTT. */
874 DRM_INFO("Memory usable by graphics device = %zdM\n", 937 DRM_INFO("Memory usable by graphics device = %zdM\n",
875 dev_priv->gtt.total >> 20); 938 gtt->base.total >> 20);
876 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", 939 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
877 dev_priv->gtt.mappable_end >> 20); 940 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
878 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
879 dev_priv->gtt.stolen_size >> 20);
880 941
881 return 0; 942 return 0;
882} 943}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 982d4732cecf..9969d10b80f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -45,49 +45,48 @@
45static unsigned long i915_stolen_to_physical(struct drm_device *dev) 45static unsigned long i915_stolen_to_physical(struct drm_device *dev)
46{ 46{
47 struct drm_i915_private *dev_priv = dev->dev_private; 47 struct drm_i915_private *dev_priv = dev->dev_private;
48 struct pci_dev *pdev = dev_priv->bridge_dev; 48 struct resource *r;
49 u32 base; 49 u32 base;
50 50
51 /* On the machines I have tested the Graphics Base of Stolen Memory 51 /* Almost universally we can find the Graphics Base of Stolen Memory
52 * is unreliable, so on those compute the base by subtracting the 52 * at offset 0x5c in the igfx configuration space. On a few (desktop)
53 * stolen memory from the Top of Low Usable DRAM which is where the 53 * machines this is also mirrored in the bridge device at different
54 * BIOS places the graphics stolen memory. 54 * locations, or in the MCHBAR. On gen2, the layout is again slightly
55 * different with the Graphics Segment immediately following Top of
56 * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
57 * reported by 865g, so we just use the top of memory as determined
58 * by the e820 probe.
55 * 59 *
56 * On gen2, the layout is slightly different with the Graphics Segment 60 * XXX However gen2 requires an unavailable symbol.
57 * immediately following Top of Memory (or Top of Usable DRAM). Note
58 * it appears that TOUD is only reported by 865g, so we just use the
59 * top of memory as determined by the e820 probe.
60 *
61 * XXX gen2 requires an unavailable symbol and 945gm fails with
62 * its value of TOLUD.
63 */ 61 */
64 base = 0; 62 base = 0;
65 if (IS_VALLEYVIEW(dev)) { 63 if (INTEL_INFO(dev)->gen >= 3) {
64 /* Read Graphics Base of Stolen Memory directly */
66 pci_read_config_dword(dev->pdev, 0x5c, &base); 65 pci_read_config_dword(dev->pdev, 0x5c, &base);
67 base &= ~((1<<20) - 1); 66 base &= ~((1<<20) - 1);
68 } else if (INTEL_INFO(dev)->gen >= 6) { 67 } else { /* GEN2 */
69 /* Read Base Data of Stolen Memory Register (BDSM) directly.
70 * Note that there is also a MCHBAR miror at 0x1080c0 or
71 * we could use device 2:0x5c instead.
72 */
73 pci_read_config_dword(pdev, 0xB0, &base);
74 base &= ~4095; /* lower bits used for locking register */
75 } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
76 /* Read Graphics Base of Stolen Memory directly */
77 pci_read_config_dword(pdev, 0xA4, &base);
78#if 0 68#if 0
79 } else if (IS_GEN3(dev)) {
80 u8 val;
81 /* Stolen is immediately below Top of Low Usable DRAM */
82 pci_read_config_byte(pdev, 0x9c, &val);
83 base = val >> 3 << 27;
84 base -= dev_priv->mm.gtt->stolen_size;
85 } else {
86 /* Stolen is immediately above Top of Memory */ 69 /* Stolen is immediately above Top of Memory */
87 base = max_low_pfn_mapped << PAGE_SHIFT; 70 base = max_low_pfn_mapped << PAGE_SHIFT;
88#endif 71#endif
89 } 72 }
90 73
74 if (base == 0)
75 return 0;
76
77 /* Verify that nothing else uses this physical address. Stolen
78 * memory should be reserved by the BIOS and hidden from the
79 * kernel. So if the region is already marked as busy, something
80 * is seriously wrong.
81 */
82 r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
83 "Graphics Stolen Memory");
84 if (r == NULL) {
85 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
86 base, base + (uint32_t)dev_priv->gtt.stolen_size);
87 base = 0;
88 }
89
91 return base; 90 return base;
92} 91}
93 92
@@ -95,32 +94,37 @@ static int i915_setup_compression(struct drm_device *dev, int size)
95{ 94{
96 struct drm_i915_private *dev_priv = dev->dev_private; 95 struct drm_i915_private *dev_priv = dev->dev_private;
97 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 96 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
97 int ret;
98 98
99 /* Try to over-allocate to reduce reallocations and fragmentation */ 99 compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
100 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
101 size <<= 1, 4096, 0);
102 if (!compressed_fb) 100 if (!compressed_fb)
103 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, 101 goto err_llb;
104 size >>= 1, 4096, 0); 102
105 if (compressed_fb) 103 /* Try to over-allocate to reduce reallocations and fragmentation */
106 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 104 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
107 if (!compressed_fb) 105 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
108 goto err; 106 if (ret)
107 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
108 size >>= 1, 4096,
109 DRM_MM_SEARCH_DEFAULT);
110 if (ret)
111 goto err_llb;
109 112
110 if (HAS_PCH_SPLIT(dev)) 113 if (HAS_PCH_SPLIT(dev))
111 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 114 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
112 else if (IS_GM45(dev)) { 115 else if (IS_GM45(dev)) {
113 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 116 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
114 } else { 117 } else {
115 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 118 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
116 4096, 4096, 0);
117 if (compressed_llb)
118 compressed_llb = drm_mm_get_block(compressed_llb,
119 4096, 4096);
120 if (!compressed_llb) 119 if (!compressed_llb)
121 goto err_fb; 120 goto err_fb;
122 121
123 dev_priv->compressed_llb = compressed_llb; 122 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
123 4096, 4096, DRM_MM_SEARCH_DEFAULT);
124 if (ret)
125 goto err_fb;
126
127 dev_priv->fbc.compressed_llb = compressed_llb;
124 128
125 I915_WRITE(FBC_CFB_BASE, 129 I915_WRITE(FBC_CFB_BASE,
126 dev_priv->mm.stolen_base + compressed_fb->start); 130 dev_priv->mm.stolen_base + compressed_fb->start);
@@ -128,8 +132,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
128 dev_priv->mm.stolen_base + compressed_llb->start); 132 dev_priv->mm.stolen_base + compressed_llb->start);
129 } 133 }
130 134
131 dev_priv->compressed_fb = compressed_fb; 135 dev_priv->fbc.compressed_fb = compressed_fb;
132 dev_priv->cfb_size = size; 136 dev_priv->fbc.size = size;
133 137
134 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", 138 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
135 size); 139 size);
@@ -137,8 +141,10 @@ static int i915_setup_compression(struct drm_device *dev, int size)
137 return 0; 141 return 0;
138 142
139err_fb: 143err_fb:
140 drm_mm_put_block(compressed_fb); 144 kfree(compressed_llb);
141err: 145 drm_mm_remove_node(compressed_fb);
146err_llb:
147 kfree(compressed_fb);
142 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 148 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
143 return -ENOSPC; 149 return -ENOSPC;
144} 150}
@@ -150,7 +156,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
150 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 156 if (!drm_mm_initialized(&dev_priv->mm.stolen))
151 return -ENODEV; 157 return -ENODEV;
152 158
153 if (size < dev_priv->cfb_size) 159 if (size < dev_priv->fbc.size)
154 return 0; 160 return 0;
155 161
156 /* Release any current block */ 162 /* Release any current block */
@@ -163,16 +169,20 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
163{ 169{
164 struct drm_i915_private *dev_priv = dev->dev_private; 170 struct drm_i915_private *dev_priv = dev->dev_private;
165 171
166 if (dev_priv->cfb_size == 0) 172 if (dev_priv->fbc.size == 0)
167 return; 173 return;
168 174
169 if (dev_priv->compressed_fb) 175 if (dev_priv->fbc.compressed_fb) {
170 drm_mm_put_block(dev_priv->compressed_fb); 176 drm_mm_remove_node(dev_priv->fbc.compressed_fb);
177 kfree(dev_priv->fbc.compressed_fb);
178 }
171 179
172 if (dev_priv->compressed_llb) 180 if (dev_priv->fbc.compressed_llb) {
173 drm_mm_put_block(dev_priv->compressed_llb); 181 drm_mm_remove_node(dev_priv->fbc.compressed_llb);
182 kfree(dev_priv->fbc.compressed_llb);
183 }
174 184
175 dev_priv->cfb_size = 0; 185 dev_priv->fbc.size = 0;
176} 186}
177 187
178void i915_gem_cleanup_stolen(struct drm_device *dev) 188void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -201,6 +211,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
201 if (IS_VALLEYVIEW(dev)) 211 if (IS_VALLEYVIEW(dev))
202 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ 212 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
203 213
214 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
215 return 0;
216
204 /* Basic memrange allocator for stolen space */ 217 /* Basic memrange allocator for stolen space */
205 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - 218 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
206 bios_reserved); 219 bios_reserved);
@@ -271,9 +284,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
271 if (obj == NULL) 284 if (obj == NULL)
272 return NULL; 285 return NULL;
273 286
274 if (drm_gem_private_object_init(dev, &obj->base, stolen->size)) 287 drm_gem_private_object_init(dev, &obj->base, stolen->size);
275 goto cleanup;
276
277 i915_gem_object_init(obj, &i915_gem_object_stolen_ops); 288 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
278 289
279 obj->pages = i915_pages_create_for_stolen(dev, 290 obj->pages = i915_pages_create_for_stolen(dev,
@@ -285,9 +296,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
285 i915_gem_object_pin_pages(obj); 296 i915_gem_object_pin_pages(obj);
286 obj->stolen = stolen; 297 obj->stolen = stolen;
287 298
288 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 299 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
289 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 300 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
290 obj->cache_level = I915_CACHE_NONE;
291 301
292 return obj; 302 return obj;
293 303
@@ -302,6 +312,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
302 struct drm_i915_private *dev_priv = dev->dev_private; 312 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct drm_i915_gem_object *obj; 313 struct drm_i915_gem_object *obj;
304 struct drm_mm_node *stolen; 314 struct drm_mm_node *stolen;
315 int ret;
305 316
306 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 317 if (!drm_mm_initialized(&dev_priv->mm.stolen))
307 return NULL; 318 return NULL;
@@ -310,17 +321,23 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
310 if (size == 0) 321 if (size == 0)
311 return NULL; 322 return NULL;
312 323
313 stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 324 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
314 if (stolen) 325 if (!stolen)
315 stolen = drm_mm_get_block(stolen, size, 4096); 326 return NULL;
316 if (stolen == NULL) 327
328 ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
329 4096, DRM_MM_SEARCH_DEFAULT);
330 if (ret) {
331 kfree(stolen);
317 return NULL; 332 return NULL;
333 }
318 334
319 obj = _i915_gem_object_create_stolen(dev, stolen); 335 obj = _i915_gem_object_create_stolen(dev, stolen);
320 if (obj) 336 if (obj)
321 return obj; 337 return obj;
322 338
323 drm_mm_put_block(stolen); 339 drm_mm_remove_node(stolen);
340 kfree(stolen);
324 return NULL; 341 return NULL;
325} 342}
326 343
@@ -331,8 +348,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
331 u32 size) 348 u32 size)
332{ 349{
333 struct drm_i915_private *dev_priv = dev->dev_private; 350 struct drm_i915_private *dev_priv = dev->dev_private;
351 struct i915_address_space *ggtt = &dev_priv->gtt.base;
334 struct drm_i915_gem_object *obj; 352 struct drm_i915_gem_object *obj;
335 struct drm_mm_node *stolen; 353 struct drm_mm_node *stolen;
354 struct i915_vma *vma;
355 int ret;
336 356
337 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 357 if (!drm_mm_initialized(&dev_priv->mm.stolen))
338 return NULL; 358 return NULL;
@@ -347,56 +367,74 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
347 if (WARN_ON(size == 0)) 367 if (WARN_ON(size == 0))
348 return NULL; 368 return NULL;
349 369
350 stolen = drm_mm_create_block(&dev_priv->mm.stolen, 370 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
351 stolen_offset, size, 371 if (!stolen)
352 false); 372 return NULL;
353 if (stolen == NULL) { 373
374 stolen->start = stolen_offset;
375 stolen->size = size;
376 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
377 if (ret) {
354 DRM_DEBUG_KMS("failed to allocate stolen space\n"); 378 DRM_DEBUG_KMS("failed to allocate stolen space\n");
379 kfree(stolen);
355 return NULL; 380 return NULL;
356 } 381 }
357 382
358 obj = _i915_gem_object_create_stolen(dev, stolen); 383 obj = _i915_gem_object_create_stolen(dev, stolen);
359 if (obj == NULL) { 384 if (obj == NULL) {
360 DRM_DEBUG_KMS("failed to allocate stolen object\n"); 385 DRM_DEBUG_KMS("failed to allocate stolen object\n");
361 drm_mm_put_block(stolen); 386 drm_mm_remove_node(stolen);
387 kfree(stolen);
362 return NULL; 388 return NULL;
363 } 389 }
364 390
365 /* Some objects just need physical mem from stolen space */ 391 /* Some objects just need physical mem from stolen space */
366 if (gtt_offset == -1) 392 if (gtt_offset == I915_GTT_OFFSET_NONE)
367 return obj; 393 return obj;
368 394
395 vma = i915_gem_vma_create(obj, ggtt);
396 if (IS_ERR(vma)) {
397 ret = PTR_ERR(vma);
398 goto err_out;
399 }
400
369 /* To simplify the initialisation sequence between KMS and GTT, 401 /* To simplify the initialisation sequence between KMS and GTT,
370 * we allow construction of the stolen object prior to 402 * we allow construction of the stolen object prior to
371 * setting up the GTT space. The actual reservation will occur 403 * setting up the GTT space. The actual reservation will occur
372 * later. 404 * later.
373 */ 405 */
374 if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { 406 vma->node.start = gtt_offset;
375 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, 407 vma->node.size = size;
376 gtt_offset, size, 408 if (drm_mm_initialized(&ggtt->mm)) {
377 false); 409 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
378 if (obj->gtt_space == NULL) { 410 if (ret) {
379 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); 411 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
380 drm_gem_object_unreference(&obj->base); 412 goto err_vma;
381 return NULL;
382 } 413 }
383 } else 414 }
384 obj->gtt_space = I915_GTT_RESERVED;
385 415
386 obj->gtt_offset = gtt_offset;
387 obj->has_global_gtt_mapping = 1; 416 obj->has_global_gtt_mapping = 1;
388 417
389 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 418 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
390 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 419 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
391 420
392 return obj; 421 return obj;
422
423err_vma:
424 i915_gem_vma_destroy(vma);
425err_out:
426 drm_mm_remove_node(stolen);
427 kfree(stolen);
428 drm_gem_object_unreference(&obj->base);
429 return NULL;
393} 430}
394 431
395void 432void
396i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 433i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
397{ 434{
398 if (obj->stolen) { 435 if (obj->stolen) {
399 drm_mm_put_block(obj->stolen); 436 drm_mm_remove_node(obj->stolen);
437 kfree(obj->stolen);
400 obj->stolen = NULL; 438 obj->stolen = NULL;
401 } 439 }
402} 440}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 537545be69db..032e9ef9c896 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
268 return true; 268 return true;
269 269
270 if (INTEL_INFO(obj->base.dev)->gen == 3) { 270 if (INTEL_INFO(obj->base.dev)->gen == 3) {
271 if (obj->gtt_offset & ~I915_FENCE_START_MASK) 271 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
272 return false; 272 return false;
273 } else { 273 } else {
274 if (obj->gtt_offset & ~I830_FENCE_START_MASK) 274 if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
275 return false; 275 return false;
276 } 276 }
277 277
278 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); 278 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
279 if (obj->gtt_space->size != size) 279 if (i915_gem_obj_ggtt_size(obj) != size)
280 return false; 280 return false;
281 281
282 if (obj->gtt_offset & (size - 1)) 282 if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
283 return false; 283 return false;
284 284
285 return true; 285 return true;
@@ -359,18 +359,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
359 */ 359 */
360 360
361 obj->map_and_fenceable = 361 obj->map_and_fenceable =
362 obj->gtt_space == NULL || 362 !i915_gem_obj_ggtt_bound(obj) ||
363 (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && 363 (i915_gem_obj_ggtt_offset(obj) +
364 obj->base.size <= dev_priv->gtt.mappable_end &&
364 i915_gem_object_fence_ok(obj, args->tiling_mode)); 365 i915_gem_object_fence_ok(obj, args->tiling_mode));
365 366
366 /* Rebind if we need a change of alignment */ 367 /* Rebind if we need a change of alignment */
367 if (!obj->map_and_fenceable) { 368 if (!obj->map_and_fenceable) {
368 u32 unfenced_alignment = 369 u32 unfenced_align =
369 i915_gem_get_gtt_alignment(dev, obj->base.size, 370 i915_gem_get_gtt_alignment(dev, obj->base.size,
370 args->tiling_mode, 371 args->tiling_mode,
371 false); 372 false);
372 if (obj->gtt_offset & (unfenced_alignment - 1)) 373 if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
373 ret = i915_gem_object_unbind(obj); 374 ret = i915_gem_object_ggtt_unbind(obj);
374 } 375 }
375 376
376 if (ret == 0) { 377 if (ret == 0) {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
new file mode 100644
index 000000000000..558e568d5b45
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -0,0 +1,1019 @@
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <generated/utsrelease.h>
31#include "i915_drv.h"
32
33static const char *yesno(int v)
34{
35 return v ? "yes" : "no";
36}
37
38static const char *ring_str(int ring)
39{
40 switch (ring) {
41 case RCS: return "render";
42 case VCS: return "bsd";
43 case BCS: return "blt";
44 case VECS: return "vebox";
45 default: return "";
46 }
47}
48
49static const char *pin_flag(int pinned)
50{
51 if (pinned > 0)
52 return " P";
53 else if (pinned < 0)
54 return " p";
55 else
56 return "";
57}
58
59static const char *tiling_flag(int tiling)
60{
61 switch (tiling) {
62 default:
63 case I915_TILING_NONE: return "";
64 case I915_TILING_X: return " X";
65 case I915_TILING_Y: return " Y";
66 }
67}
68
69static const char *dirty_flag(int dirty)
70{
71 return dirty ? " dirty" : "";
72}
73
74static const char *purgeable_flag(int purgeable)
75{
76 return purgeable ? " purgeable" : "";
77}
78
79static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
80{
81
82 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
83 e->err = -ENOSPC;
84 return false;
85 }
86
87 if (e->bytes == e->size - 1 || e->err)
88 return false;
89
90 return true;
91}
92
93static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
94 unsigned len)
95{
96 if (e->pos + len <= e->start) {
97 e->pos += len;
98 return false;
99 }
100
101 /* First vsnprintf needs to fit in its entirety for memmove */
102 if (len >= e->size) {
103 e->err = -EIO;
104 return false;
105 }
106
107 return true;
108}
109
110static void __i915_error_advance(struct drm_i915_error_state_buf *e,
111 unsigned len)
112{
113 /* If this is first printf in this window, adjust it so that
114 * start position matches start of the buffer
115 */
116
117 if (e->pos < e->start) {
118 const size_t off = e->start - e->pos;
119
120 /* Should not happen but be paranoid */
121 if (off > len || e->bytes) {
122 e->err = -EIO;
123 return;
124 }
125
126 memmove(e->buf, e->buf + off, len - off);
127 e->bytes = len - off;
128 e->pos = e->start;
129 return;
130 }
131
132 e->bytes += len;
133 e->pos += len;
134}
135
136static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
137 const char *f, va_list args)
138{
139 unsigned len;
140
141 if (!__i915_error_ok(e))
142 return;
143
144 /* Seek the first printf which is hits start position */
145 if (e->pos < e->start) {
146 len = vsnprintf(NULL, 0, f, args);
147 if (!__i915_error_seek(e, len))
148 return;
149 }
150
151 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
152 if (len >= e->size - e->bytes)
153 len = e->size - e->bytes - 1;
154
155 __i915_error_advance(e, len);
156}
157
158static void i915_error_puts(struct drm_i915_error_state_buf *e,
159 const char *str)
160{
161 unsigned len;
162
163 if (!__i915_error_ok(e))
164 return;
165
166 len = strlen(str);
167
168 /* Seek the first printf which is hits start position */
169 if (e->pos < e->start) {
170 if (!__i915_error_seek(e, len))
171 return;
172 }
173
174 if (len >= e->size - e->bytes)
175 len = e->size - e->bytes - 1;
176 memcpy(e->buf + e->bytes, str, len);
177
178 __i915_error_advance(e, len);
179}
180
181#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
182#define err_puts(e, s) i915_error_puts(e, s)
183
184static void print_error_buffers(struct drm_i915_error_state_buf *m,
185 const char *name,
186 struct drm_i915_error_buffer *err,
187 int count)
188{
189 err_printf(m, "%s [%d]:\n", name, count);
190
191 while (count--) {
192 err_printf(m, " %08x %8u %02x %02x %x %x",
193 err->gtt_offset,
194 err->size,
195 err->read_domains,
196 err->write_domain,
197 err->rseqno, err->wseqno);
198 err_puts(m, pin_flag(err->pinned));
199 err_puts(m, tiling_flag(err->tiling));
200 err_puts(m, dirty_flag(err->dirty));
201 err_puts(m, purgeable_flag(err->purgeable));
202 err_puts(m, err->ring != -1 ? " " : "");
203 err_puts(m, ring_str(err->ring));
204 err_puts(m, i915_cache_level_str(err->cache_level));
205
206 if (err->name)
207 err_printf(m, " (name: %d)", err->name);
208 if (err->fence_reg != I915_FENCE_REG_NONE)
209 err_printf(m, " (fence: %d)", err->fence_reg);
210
211 err_puts(m, "\n");
212 err++;
213 }
214}
215
216static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
217 struct drm_device *dev,
218 struct drm_i915_error_state *error,
219 unsigned ring)
220{
221 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
222 err_printf(m, "%s command stream:\n", ring_str(ring));
223 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
224 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
225 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
226 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
227 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
228 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
229 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
230 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
231 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
232
233 if (INTEL_INFO(dev)->gen >= 4)
234 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
235 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
236 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
237 if (INTEL_INFO(dev)->gen >= 6) {
238 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
239 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
240 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
241 error->semaphore_mboxes[ring][0],
242 error->semaphore_seqno[ring][0]);
243 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
244 error->semaphore_mboxes[ring][1],
245 error->semaphore_seqno[ring][1]);
246 if (HAS_VEBOX(dev)) {
247 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
248 error->semaphore_mboxes[ring][2],
249 error->semaphore_seqno[ring][2]);
250 }
251 }
252 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
253 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
254 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
255 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
256}
257
258void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
259{
260 va_list args;
261
262 va_start(args, f);
263 i915_error_vprintf(e, f, args);
264 va_end(args);
265}
266
267int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
268 const struct i915_error_state_file_priv *error_priv)
269{
270 struct drm_device *dev = error_priv->dev;
271 drm_i915_private_t *dev_priv = dev->dev_private;
272 struct drm_i915_error_state *error = error_priv->error;
273 struct intel_ring_buffer *ring;
274 int i, j, page, offset, elt;
275
276 if (!error) {
277 err_printf(m, "no error state collected\n");
278 goto out;
279 }
280
281 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
282 error->time.tv_usec);
283 err_printf(m, "Kernel: " UTS_RELEASE "\n");
284 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
285 err_printf(m, "EIR: 0x%08x\n", error->eir);
286 err_printf(m, "IER: 0x%08x\n", error->ier);
287 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
288 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
289 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
290 err_printf(m, "CCID: 0x%08x\n", error->ccid);
291
292 for (i = 0; i < dev_priv->num_fence_regs; i++)
293 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
294
295 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
296 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
297 error->extra_instdone[i]);
298
299 if (INTEL_INFO(dev)->gen >= 6) {
300 err_printf(m, "ERROR: 0x%08x\n", error->error);
301 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
302 }
303
304 if (INTEL_INFO(dev)->gen == 7)
305 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
306
307 for_each_ring(ring, dev_priv, i)
308 i915_ring_error_state(m, dev, error, i);
309
310 if (error->active_bo)
311 print_error_buffers(m, "Active",
312 error->active_bo[0],
313 error->active_bo_count[0]);
314
315 if (error->pinned_bo)
316 print_error_buffers(m, "Pinned",
317 error->pinned_bo[0],
318 error->pinned_bo_count[0]);
319
320 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
321 struct drm_i915_error_object *obj;
322
323 if ((obj = error->ring[i].batchbuffer)) {
324 err_printf(m, "%s --- gtt_offset = 0x%08x\n",
325 dev_priv->ring[i].name,
326 obj->gtt_offset);
327 offset = 0;
328 for (page = 0; page < obj->page_count; page++) {
329 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
330 err_printf(m, "%08x : %08x\n", offset,
331 obj->pages[page][elt]);
332 offset += 4;
333 }
334 }
335 }
336
337 if (error->ring[i].num_requests) {
338 err_printf(m, "%s --- %d requests\n",
339 dev_priv->ring[i].name,
340 error->ring[i].num_requests);
341 for (j = 0; j < error->ring[i].num_requests; j++) {
342 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
343 error->ring[i].requests[j].seqno,
344 error->ring[i].requests[j].jiffies,
345 error->ring[i].requests[j].tail);
346 }
347 }
348
349 if ((obj = error->ring[i].ringbuffer)) {
350 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
351 dev_priv->ring[i].name,
352 obj->gtt_offset);
353 offset = 0;
354 for (page = 0; page < obj->page_count; page++) {
355 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
356 err_printf(m, "%08x : %08x\n",
357 offset,
358 obj->pages[page][elt]);
359 offset += 4;
360 }
361 }
362 }
363
364 obj = error->ring[i].ctx;
365 if (obj) {
366 err_printf(m, "%s --- HW Context = 0x%08x\n",
367 dev_priv->ring[i].name,
368 obj->gtt_offset);
369 offset = 0;
370 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
371 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
372 offset,
373 obj->pages[0][elt],
374 obj->pages[0][elt+1],
375 obj->pages[0][elt+2],
376 obj->pages[0][elt+3]);
377 offset += 16;
378 }
379 }
380 }
381
382 if (error->overlay)
383 intel_overlay_print_error_state(m, error->overlay);
384
385 if (error->display)
386 intel_display_print_error_state(m, dev, error->display);
387
388out:
389 if (m->bytes == 0 && m->err)
390 return m->err;
391
392 return 0;
393}
394
395int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
396 size_t count, loff_t pos)
397{
398 memset(ebuf, 0, sizeof(*ebuf));
399
400 /* We need to have enough room to store any i915_error_state printf
401 * so that we can move it to start position.
402 */
403 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
404 ebuf->buf = kmalloc(ebuf->size,
405 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
406
407 if (ebuf->buf == NULL) {
408 ebuf->size = PAGE_SIZE;
409 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
410 }
411
412 if (ebuf->buf == NULL) {
413 ebuf->size = 128;
414 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
415 }
416
417 if (ebuf->buf == NULL)
418 return -ENOMEM;
419
420 ebuf->start = pos;
421
422 return 0;
423}
424
425static void i915_error_object_free(struct drm_i915_error_object *obj)
426{
427 int page;
428
429 if (obj == NULL)
430 return;
431
432 for (page = 0; page < obj->page_count; page++)
433 kfree(obj->pages[page]);
434
435 kfree(obj);
436}
437
438static void i915_error_state_free(struct kref *error_ref)
439{
440 struct drm_i915_error_state *error = container_of(error_ref,
441 typeof(*error), ref);
442 int i;
443
444 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
445 i915_error_object_free(error->ring[i].batchbuffer);
446 i915_error_object_free(error->ring[i].ringbuffer);
447 i915_error_object_free(error->ring[i].ctx);
448 kfree(error->ring[i].requests);
449 }
450
451 kfree(error->active_bo);
452 kfree(error->overlay);
453 kfree(error->display);
454 kfree(error);
455}
456
457static struct drm_i915_error_object *
458i915_error_object_create_sized(struct drm_i915_private *dev_priv,
459 struct drm_i915_gem_object *src,
460 const int num_pages)
461{
462 struct drm_i915_error_object *dst;
463 int i;
464 u32 reloc_offset;
465
466 if (src == NULL || src->pages == NULL)
467 return NULL;
468
469 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
470 if (dst == NULL)
471 return NULL;
472
473 reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
474 for (i = 0; i < num_pages; i++) {
475 unsigned long flags;
476 void *d;
477
478 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
479 if (d == NULL)
480 goto unwind;
481
482 local_irq_save(flags);
483 if (reloc_offset < dev_priv->gtt.mappable_end &&
484 src->has_global_gtt_mapping) {
485 void __iomem *s;
486
487 /* Simply ignore tiling or any overlapping fence.
488 * It's part of the error state, and this hopefully
489 * captures what the GPU read.
490 */
491
492 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
493 reloc_offset);
494 memcpy_fromio(d, s, PAGE_SIZE);
495 io_mapping_unmap_atomic(s);
496 } else if (src->stolen) {
497 unsigned long offset;
498
499 offset = dev_priv->mm.stolen_base;
500 offset += src->stolen->start;
501 offset += i << PAGE_SHIFT;
502
503 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
504 } else {
505 struct page *page;
506 void *s;
507
508 page = i915_gem_object_get_page(src, i);
509
510 drm_clflush_pages(&page, 1);
511
512 s = kmap_atomic(page);
513 memcpy(d, s, PAGE_SIZE);
514 kunmap_atomic(s);
515
516 drm_clflush_pages(&page, 1);
517 }
518 local_irq_restore(flags);
519
520 dst->pages[i] = d;
521
522 reloc_offset += PAGE_SIZE;
523 }
524 dst->page_count = num_pages;
525
526 return dst;
527
528unwind:
529 while (i--)
530 kfree(dst->pages[i]);
531 kfree(dst);
532 return NULL;
533}
534#define i915_error_object_create(dev_priv, src) \
535 i915_error_object_create_sized((dev_priv), (src), \
536 (src)->base.size>>PAGE_SHIFT)
537
538static void capture_bo(struct drm_i915_error_buffer *err,
539 struct drm_i915_gem_object *obj)
540{
541 err->size = obj->base.size;
542 err->name = obj->base.name;
543 err->rseqno = obj->last_read_seqno;
544 err->wseqno = obj->last_write_seqno;
545 err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
546 err->read_domains = obj->base.read_domains;
547 err->write_domain = obj->base.write_domain;
548 err->fence_reg = obj->fence_reg;
549 err->pinned = 0;
550 if (obj->pin_count > 0)
551 err->pinned = 1;
552 if (obj->user_pin_count > 0)
553 err->pinned = -1;
554 err->tiling = obj->tiling_mode;
555 err->dirty = obj->dirty;
556 err->purgeable = obj->madv != I915_MADV_WILLNEED;
557 err->ring = obj->ring ? obj->ring->id : -1;
558 err->cache_level = obj->cache_level;
559}
560
561static u32 capture_active_bo(struct drm_i915_error_buffer *err,
562 int count, struct list_head *head)
563{
564 struct i915_vma *vma;
565 int i = 0;
566
567 list_for_each_entry(vma, head, mm_list) {
568 capture_bo(err++, vma->obj);
569 if (++i == count)
570 break;
571 }
572
573 return i;
574}
575
576static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
577 int count, struct list_head *head)
578{
579 struct drm_i915_gem_object *obj;
580 int i = 0;
581
582 list_for_each_entry(obj, head, global_list) {
583 if (obj->pin_count == 0)
584 continue;
585
586 capture_bo(err++, obj);
587 if (++i == count)
588 break;
589 }
590
591 return i;
592}
593
594static void i915_gem_record_fences(struct drm_device *dev,
595 struct drm_i915_error_state *error)
596{
597 struct drm_i915_private *dev_priv = dev->dev_private;
598 int i;
599
600 /* Fences */
601 switch (INTEL_INFO(dev)->gen) {
602 case 7:
603 case 6:
604 for (i = 0; i < dev_priv->num_fence_regs; i++)
605 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
606 break;
607 case 5:
608 case 4:
609 for (i = 0; i < 16; i++)
610 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
611 break;
612 case 3:
613 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
614 for (i = 0; i < 8; i++)
615 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
616 case 2:
617 for (i = 0; i < 8; i++)
618 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
619 break;
620
621 default:
622 BUG();
623 }
624}
625
626static struct drm_i915_error_object *
627i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
628 struct intel_ring_buffer *ring)
629{
630 struct i915_address_space *vm;
631 struct i915_vma *vma;
632 struct drm_i915_gem_object *obj;
633 u32 seqno;
634
635 if (!ring->get_seqno)
636 return NULL;
637
638 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
639 u32 acthd = I915_READ(ACTHD);
640
641 if (WARN_ON(ring->id != RCS))
642 return NULL;
643
644 obj = ring->private;
645 if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
646 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
647 return i915_error_object_create(dev_priv, obj);
648 }
649
650 seqno = ring->get_seqno(ring, false);
651 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
652 list_for_each_entry(vma, &vm->active_list, mm_list) {
653 obj = vma->obj;
654 if (obj->ring != ring)
655 continue;
656
657 if (i915_seqno_passed(seqno, obj->last_read_seqno))
658 continue;
659
660 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
661 continue;
662
663 /* We need to copy these to an anonymous buffer as the simplest
664 * method to avoid being overwritten by userspace.
665 */
666 return i915_error_object_create(dev_priv, obj);
667 }
668 }
669
670 return NULL;
671}
672
673static void i915_record_ring_state(struct drm_device *dev,
674 struct drm_i915_error_state *error,
675 struct intel_ring_buffer *ring)
676{
677 struct drm_i915_private *dev_priv = dev->dev_private;
678
679 if (INTEL_INFO(dev)->gen >= 6) {
680 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
681 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
682 error->semaphore_mboxes[ring->id][0]
683 = I915_READ(RING_SYNC_0(ring->mmio_base));
684 error->semaphore_mboxes[ring->id][1]
685 = I915_READ(RING_SYNC_1(ring->mmio_base));
686 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
687 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
688 }
689
690 if (HAS_VEBOX(dev)) {
691 error->semaphore_mboxes[ring->id][2] =
692 I915_READ(RING_SYNC_2(ring->mmio_base));
693 error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
694 }
695
696 if (INTEL_INFO(dev)->gen >= 4) {
697 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
698 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
699 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
700 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
701 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
702 if (ring->id == RCS)
703 error->bbaddr = I915_READ64(BB_ADDR);
704 } else {
705 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
706 error->ipeir[ring->id] = I915_READ(IPEIR);
707 error->ipehr[ring->id] = I915_READ(IPEHR);
708 error->instdone[ring->id] = I915_READ(INSTDONE);
709 }
710
711 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
712 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
713 error->seqno[ring->id] = ring->get_seqno(ring, false);
714 error->acthd[ring->id] = intel_ring_get_active_head(ring);
715 error->head[ring->id] = I915_READ_HEAD(ring);
716 error->tail[ring->id] = I915_READ_TAIL(ring);
717 error->ctl[ring->id] = I915_READ_CTL(ring);
718
719 error->cpu_ring_head[ring->id] = ring->head;
720 error->cpu_ring_tail[ring->id] = ring->tail;
721}
722
723
724static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
725 struct drm_i915_error_state *error,
726 struct drm_i915_error_ring *ering)
727{
728 struct drm_i915_private *dev_priv = ring->dev->dev_private;
729 struct drm_i915_gem_object *obj;
730
731 /* Currently render ring is the only HW context user */
732 if (ring->id != RCS || !error->ccid)
733 return;
734
735 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
736 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
737 ering->ctx = i915_error_object_create_sized(dev_priv,
738 obj, 1);
739 break;
740 }
741 }
742}
743
744static void i915_gem_record_rings(struct drm_device *dev,
745 struct drm_i915_error_state *error)
746{
747 struct drm_i915_private *dev_priv = dev->dev_private;
748 struct intel_ring_buffer *ring;
749 struct drm_i915_gem_request *request;
750 int i, count;
751
752 for_each_ring(ring, dev_priv, i) {
753 i915_record_ring_state(dev, error, ring);
754
755 error->ring[i].batchbuffer =
756 i915_error_first_batchbuffer(dev_priv, ring);
757
758 error->ring[i].ringbuffer =
759 i915_error_object_create(dev_priv, ring->obj);
760
761
762 i915_gem_record_active_context(ring, error, &error->ring[i]);
763
764 count = 0;
765 list_for_each_entry(request, &ring->request_list, list)
766 count++;
767
768 error->ring[i].num_requests = count;
769 error->ring[i].requests =
770 kmalloc(count*sizeof(struct drm_i915_error_request),
771 GFP_ATOMIC);
772 if (error->ring[i].requests == NULL) {
773 error->ring[i].num_requests = 0;
774 continue;
775 }
776
777 count = 0;
778 list_for_each_entry(request, &ring->request_list, list) {
779 struct drm_i915_error_request *erq;
780
781 erq = &error->ring[i].requests[count++];
782 erq->seqno = request->seqno;
783 erq->jiffies = request->emitted_jiffies;
784 erq->tail = request->tail;
785 }
786 }
787}
788
789/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
790 * VM.
791 */
792static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
793 struct drm_i915_error_state *error,
794 struct i915_address_space *vm,
795 const int ndx)
796{
797 struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
798 struct drm_i915_gem_object *obj;
799 struct i915_vma *vma;
800 int i;
801
802 i = 0;
803 list_for_each_entry(vma, &vm->active_list, mm_list)
804 i++;
805 error->active_bo_count[ndx] = i;
806 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
807 if (obj->pin_count)
808 i++;
809 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
810
811 if (i) {
812 active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
813 if (active_bo)
814 pinned_bo = active_bo + error->active_bo_count[ndx];
815 }
816
817 if (active_bo)
818 error->active_bo_count[ndx] =
819 capture_active_bo(active_bo,
820 error->active_bo_count[ndx],
821 &vm->active_list);
822
823 if (pinned_bo)
824 error->pinned_bo_count[ndx] =
825 capture_pinned_bo(pinned_bo,
826 error->pinned_bo_count[ndx],
827 &dev_priv->mm.bound_list);
828 error->active_bo[ndx] = active_bo;
829 error->pinned_bo[ndx] = pinned_bo;
830}
831
832static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
833 struct drm_i915_error_state *error)
834{
835 struct i915_address_space *vm;
836 int cnt = 0, i = 0;
837
838 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
839 cnt++;
840
841 if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
842 cnt = 1;
843
844 vm = &dev_priv->gtt.base;
845
846 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
847 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
848 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
849 GFP_ATOMIC);
850 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
851 GFP_ATOMIC);
852
853 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
854 i915_gem_capture_vm(dev_priv, error, vm, i++);
855}
856
857/**
858 * i915_capture_error_state - capture an error record for later analysis
859 * @dev: drm device
860 *
861 * Should be called when an error is detected (either a hang or an error
862 * interrupt) to capture error state from the time of the error. Fills
863 * out a structure which becomes available in debugfs for user level tools
864 * to pick up.
865 */
866void i915_capture_error_state(struct drm_device *dev)
867{
868 struct drm_i915_private *dev_priv = dev->dev_private;
869 struct drm_i915_error_state *error;
870 unsigned long flags;
871 int pipe;
872
873 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
874 error = dev_priv->gpu_error.first_error;
875 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
876 if (error)
877 return;
878
879 /* Account for pipe specific data like PIPE*STAT */
880 error = kzalloc(sizeof(*error), GFP_ATOMIC);
881 if (!error) {
882 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
883 return;
884 }
885
886 DRM_INFO("capturing error event; look for more information in "
887 "/sys/class/drm/card%d/error\n", dev->primary->index);
888
889 kref_init(&error->ref);
890 error->eir = I915_READ(EIR);
891 error->pgtbl_er = I915_READ(PGTBL_ER);
892 if (HAS_HW_CONTEXTS(dev))
893 error->ccid = I915_READ(CCID);
894
895 if (HAS_PCH_SPLIT(dev))
896 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
897 else if (IS_VALLEYVIEW(dev))
898 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
899 else if (IS_GEN2(dev))
900 error->ier = I915_READ16(IER);
901 else
902 error->ier = I915_READ(IER);
903
904 if (INTEL_INFO(dev)->gen >= 6)
905 error->derrmr = I915_READ(DERRMR);
906
907 if (IS_VALLEYVIEW(dev))
908 error->forcewake = I915_READ(FORCEWAKE_VLV);
909 else if (INTEL_INFO(dev)->gen >= 7)
910 error->forcewake = I915_READ(FORCEWAKE_MT);
911 else if (INTEL_INFO(dev)->gen == 6)
912 error->forcewake = I915_READ(FORCEWAKE);
913
914 if (!HAS_PCH_SPLIT(dev))
915 for_each_pipe(pipe)
916 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
917
918 if (INTEL_INFO(dev)->gen >= 6) {
919 error->error = I915_READ(ERROR_GEN6);
920 error->done_reg = I915_READ(DONE_REG);
921 }
922
923 if (INTEL_INFO(dev)->gen == 7)
924 error->err_int = I915_READ(GEN7_ERR_INT);
925
926 i915_get_extra_instdone(dev, error->extra_instdone);
927
928 i915_gem_capture_buffers(dev_priv, error);
929 i915_gem_record_fences(dev, error);
930 i915_gem_record_rings(dev, error);
931
932 do_gettimeofday(&error->time);
933
934 error->overlay = intel_overlay_capture_error_state(dev);
935 error->display = intel_display_capture_error_state(dev);
936
937 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
938 if (dev_priv->gpu_error.first_error == NULL) {
939 dev_priv->gpu_error.first_error = error;
940 error = NULL;
941 }
942 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
943
944 if (error)
945 i915_error_state_free(&error->ref);
946}
947
948void i915_error_state_get(struct drm_device *dev,
949 struct i915_error_state_file_priv *error_priv)
950{
951 struct drm_i915_private *dev_priv = dev->dev_private;
952 unsigned long flags;
953
954 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
955 error_priv->error = dev_priv->gpu_error.first_error;
956 if (error_priv->error)
957 kref_get(&error_priv->error->ref);
958 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
959
960}
961
962void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
963{
964 if (error_priv->error)
965 kref_put(&error_priv->error->ref, i915_error_state_free);
966}
967
968void i915_destroy_error_state(struct drm_device *dev)
969{
970 struct drm_i915_private *dev_priv = dev->dev_private;
971 struct drm_i915_error_state *error;
972 unsigned long flags;
973
974 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
975 error = dev_priv->gpu_error.first_error;
976 dev_priv->gpu_error.first_error = NULL;
977 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
978
979 if (error)
980 kref_put(&error->ref, i915_error_state_free);
981}
982
983const char *i915_cache_level_str(int type)
984{
985 switch (type) {
986 case I915_CACHE_NONE: return " uncached";
987 case I915_CACHE_LLC: return " snooped or LLC";
988 case I915_CACHE_L3_LLC: return " L3+LLC";
989 default: return "";
990 }
991}
992
993/* NB: please notice the memset */
994void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
995{
996 struct drm_i915_private *dev_priv = dev->dev_private;
997 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
998
999 switch (INTEL_INFO(dev)->gen) {
1000 case 2:
1001 case 3:
1002 instdone[0] = I915_READ(INSTDONE);
1003 break;
1004 case 4:
1005 case 5:
1006 case 6:
1007 instdone[0] = I915_READ(INSTDONE_I965);
1008 instdone[1] = I915_READ(INSTDONE1);
1009 break;
1010 default:
1011 WARN_ONCE(1, "Unsupported platform\n");
1012 case 7:
1013 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1014 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1015 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1016 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1017 break;
1018 }
1019}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3d92a7cef154..a03b445ceb5f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,6 +85,12 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
85{ 85{
86 assert_spin_locked(&dev_priv->irq_lock); 86 assert_spin_locked(&dev_priv->irq_lock);
87 87
88 if (dev_priv->pc8.irqs_disabled) {
89 WARN(1, "IRQs disabled\n");
90 dev_priv->pc8.regsave.deimr &= ~mask;
91 return;
92 }
93
88 if ((dev_priv->irq_mask & mask) != 0) { 94 if ((dev_priv->irq_mask & mask) != 0) {
89 dev_priv->irq_mask &= ~mask; 95 dev_priv->irq_mask &= ~mask;
90 I915_WRITE(DEIMR, dev_priv->irq_mask); 96 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -97,6 +103,12 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
97{ 103{
98 assert_spin_locked(&dev_priv->irq_lock); 104 assert_spin_locked(&dev_priv->irq_lock);
99 105
106 if (dev_priv->pc8.irqs_disabled) {
107 WARN(1, "IRQs disabled\n");
108 dev_priv->pc8.regsave.deimr |= mask;
109 return;
110 }
111
100 if ((dev_priv->irq_mask & mask) != mask) { 112 if ((dev_priv->irq_mask & mask) != mask) {
101 dev_priv->irq_mask |= mask; 113 dev_priv->irq_mask |= mask;
102 I915_WRITE(DEIMR, dev_priv->irq_mask); 114 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -104,6 +116,85 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
104 } 116 }
105} 117}
106 118
119/**
120 * ilk_update_gt_irq - update GTIMR
121 * @dev_priv: driver private
122 * @interrupt_mask: mask of interrupt bits to update
123 * @enabled_irq_mask: mask of interrupt bits to enable
124 */
125static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
126 uint32_t interrupt_mask,
127 uint32_t enabled_irq_mask)
128{
129 assert_spin_locked(&dev_priv->irq_lock);
130
131 if (dev_priv->pc8.irqs_disabled) {
132 WARN(1, "IRQs disabled\n");
133 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
134 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
135 interrupt_mask);
136 return;
137 }
138
139 dev_priv->gt_irq_mask &= ~interrupt_mask;
140 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
141 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
142 POSTING_READ(GTIMR);
143}
144
145void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
146{
147 ilk_update_gt_irq(dev_priv, mask, mask);
148}
149
150void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
151{
152 ilk_update_gt_irq(dev_priv, mask, 0);
153}
154
155/**
156 * snb_update_pm_irq - update GEN6_PMIMR
157 * @dev_priv: driver private
158 * @interrupt_mask: mask of interrupt bits to update
159 * @enabled_irq_mask: mask of interrupt bits to enable
160 */
161static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
162 uint32_t interrupt_mask,
163 uint32_t enabled_irq_mask)
164{
165 uint32_t new_val;
166
167 assert_spin_locked(&dev_priv->irq_lock);
168
169 if (dev_priv->pc8.irqs_disabled) {
170 WARN(1, "IRQs disabled\n");
171 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
172 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
173 interrupt_mask);
174 return;
175 }
176
177 new_val = dev_priv->pm_irq_mask;
178 new_val &= ~interrupt_mask;
179 new_val |= (~enabled_irq_mask & interrupt_mask);
180
181 if (new_val != dev_priv->pm_irq_mask) {
182 dev_priv->pm_irq_mask = new_val;
183 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
184 POSTING_READ(GEN6_PMIMR);
185 }
186}
187
188void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
189{
190 snb_update_pm_irq(dev_priv, mask, mask);
191}
192
193void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
194{
195 snb_update_pm_irq(dev_priv, mask, 0);
196}
197
107static bool ivb_can_enable_err_int(struct drm_device *dev) 198static bool ivb_can_enable_err_int(struct drm_device *dev)
108{ 199{
109 struct drm_i915_private *dev_priv = dev->dev_private; 200 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -128,6 +219,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
128 enum pipe pipe; 219 enum pipe pipe;
129 struct intel_crtc *crtc; 220 struct intel_crtc *crtc;
130 221
222 assert_spin_locked(&dev_priv->irq_lock);
223
131 for_each_pipe(pipe) { 224 for_each_pipe(pipe) {
132 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 225 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
133 226
@@ -152,38 +245,75 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
152} 245}
153 246
154static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 247static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
155 bool enable) 248 enum pipe pipe, bool enable)
156{ 249{
157 struct drm_i915_private *dev_priv = dev->dev_private; 250 struct drm_i915_private *dev_priv = dev->dev_private;
158
159 if (enable) { 251 if (enable) {
252 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
253
160 if (!ivb_can_enable_err_int(dev)) 254 if (!ivb_can_enable_err_int(dev))
161 return; 255 return;
162 256
163 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
164 ERR_INT_FIFO_UNDERRUN_B |
165 ERR_INT_FIFO_UNDERRUN_C);
166
167 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 257 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
168 } else { 258 } else {
259 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
260
261 /* Change the state _after_ we've read out the current one. */
169 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 262 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
263
264 if (!was_enabled &&
265 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
266 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
267 pipe_name(pipe));
268 }
170 } 269 }
171} 270}
172 271
173static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc, 272/**
273 * ibx_display_interrupt_update - update SDEIMR
274 * @dev_priv: driver private
275 * @interrupt_mask: mask of interrupt bits to update
276 * @enabled_irq_mask: mask of interrupt bits to enable
277 */
278static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
279 uint32_t interrupt_mask,
280 uint32_t enabled_irq_mask)
281{
282 uint32_t sdeimr = I915_READ(SDEIMR);
283 sdeimr &= ~interrupt_mask;
284 sdeimr |= (~enabled_irq_mask & interrupt_mask);
285
286 assert_spin_locked(&dev_priv->irq_lock);
287
288 if (dev_priv->pc8.irqs_disabled &&
289 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
290 WARN(1, "IRQs disabled\n");
291 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
292 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
293 interrupt_mask);
294 return;
295 }
296
297 I915_WRITE(SDEIMR, sdeimr);
298 POSTING_READ(SDEIMR);
299}
300#define ibx_enable_display_interrupt(dev_priv, bits) \
301 ibx_display_interrupt_update((dev_priv), (bits), (bits))
302#define ibx_disable_display_interrupt(dev_priv, bits) \
303 ibx_display_interrupt_update((dev_priv), (bits), 0)
304
305static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
306 enum transcoder pch_transcoder,
174 bool enable) 307 bool enable)
175{ 308{
176 struct drm_device *dev = crtc->base.dev;
177 struct drm_i915_private *dev_priv = dev->dev_private; 309 struct drm_i915_private *dev_priv = dev->dev_private;
178 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : 310 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
179 SDE_TRANSB_FIFO_UNDER; 311 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
180 312
181 if (enable) 313 if (enable)
182 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit); 314 ibx_enable_display_interrupt(dev_priv, bit);
183 else 315 else
184 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit); 316 ibx_disable_display_interrupt(dev_priv, bit);
185
186 POSTING_READ(SDEIMR);
187} 317}
188 318
189static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 319static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -193,19 +323,26 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
193 struct drm_i915_private *dev_priv = dev->dev_private; 323 struct drm_i915_private *dev_priv = dev->dev_private;
194 324
195 if (enable) { 325 if (enable) {
326 I915_WRITE(SERR_INT,
327 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
328
196 if (!cpt_can_enable_serr_int(dev)) 329 if (!cpt_can_enable_serr_int(dev))
197 return; 330 return;
198 331
199 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN | 332 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
200 SERR_INT_TRANS_B_FIFO_UNDERRUN |
201 SERR_INT_TRANS_C_FIFO_UNDERRUN);
202
203 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
204 } else { 333 } else {
205 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT); 334 uint32_t tmp = I915_READ(SERR_INT);
206 } 335 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
207 336
208 POSTING_READ(SDEIMR); 337 /* Change the state _after_ we've read out the current one. */
338 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
339
340 if (!was_enabled &&
341 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
342 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
343 transcoder_name(pch_transcoder));
344 }
345 }
209} 346}
210 347
211/** 348/**
@@ -243,7 +380,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
243 if (IS_GEN5(dev) || IS_GEN6(dev)) 380 if (IS_GEN5(dev) || IS_GEN6(dev))
244 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 381 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
245 else if (IS_GEN7(dev)) 382 else if (IS_GEN7(dev))
246 ivybridge_set_fifo_underrun_reporting(dev, enable); 383 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
247 384
248done: 385done:
249 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 386 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -269,29 +406,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
269 bool enable) 406 bool enable)
270{ 407{
271 struct drm_i915_private *dev_priv = dev->dev_private; 408 struct drm_i915_private *dev_priv = dev->dev_private;
272 enum pipe p; 409 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
273 struct drm_crtc *crtc; 410 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
274 struct intel_crtc *intel_crtc;
275 unsigned long flags; 411 unsigned long flags;
276 bool ret; 412 bool ret;
277 413
278 if (HAS_PCH_LPT(dev)) { 414 /*
279 crtc = NULL; 415 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
280 for_each_pipe(p) { 416 * has only one pch transcoder A that all pipes can use. To avoid racy
281 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p]; 417 * pch transcoder -> pipe lookups from interrupt code simply store the
282 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) { 418 * underrun statistics in crtc A. Since we never expose this anywhere
283 crtc = c; 419 * nor use it outside of the fifo underrun code here using the "wrong"
284 break; 420 * crtc on LPT won't cause issues.
285 } 421 */
286 }
287 if (!crtc) {
288 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
289 return false;
290 }
291 } else {
292 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
293 }
294 intel_crtc = to_intel_crtc(crtc);
295 422
296 spin_lock_irqsave(&dev_priv->irq_lock, flags); 423 spin_lock_irqsave(&dev_priv->irq_lock, flags);
297 424
@@ -303,7 +430,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
303 intel_crtc->pch_fifo_underrun_disabled = !enable; 430 intel_crtc->pch_fifo_underrun_disabled = !enable;
304 431
305 if (HAS_PCH_IBX(dev)) 432 if (HAS_PCH_IBX(dev))
306 ibx_set_fifo_underrun_reporting(intel_crtc, enable); 433 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
307 else 434 else
308 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 435 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
309 436
@@ -319,6 +446,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
319 u32 reg = PIPESTAT(pipe); 446 u32 reg = PIPESTAT(pipe);
320 u32 pipestat = I915_READ(reg) & 0x7fff0000; 447 u32 pipestat = I915_READ(reg) & 0x7fff0000;
321 448
449 assert_spin_locked(&dev_priv->irq_lock);
450
322 if ((pipestat & mask) == mask) 451 if ((pipestat & mask) == mask)
323 return; 452 return;
324 453
@@ -334,6 +463,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
334 u32 reg = PIPESTAT(pipe); 463 u32 reg = PIPESTAT(pipe);
335 u32 pipestat = I915_READ(reg) & 0x7fff0000; 464 u32 pipestat = I915_READ(reg) & 0x7fff0000;
336 465
466 assert_spin_locked(&dev_priv->irq_lock);
467
337 if ((pipestat & mask) == 0) 468 if ((pipestat & mask) == 0)
338 return; 469 return;
339 470
@@ -625,14 +756,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
625 drm_kms_helper_hotplug_event(dev); 756 drm_kms_helper_hotplug_event(dev);
626} 757}
627 758
628static void ironlake_handle_rps_change(struct drm_device *dev) 759static void ironlake_rps_change_irq_handler(struct drm_device *dev)
629{ 760{
630 drm_i915_private_t *dev_priv = dev->dev_private; 761 drm_i915_private_t *dev_priv = dev->dev_private;
631 u32 busy_up, busy_down, max_avg, min_avg; 762 u32 busy_up, busy_down, max_avg, min_avg;
632 u8 new_delay; 763 u8 new_delay;
633 unsigned long flags;
634 764
635 spin_lock_irqsave(&mchdev_lock, flags); 765 spin_lock(&mchdev_lock);
636 766
637 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 767 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
638 768
@@ -660,7 +790,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
660 if (ironlake_set_drps(dev, new_delay)) 790 if (ironlake_set_drps(dev, new_delay))
661 dev_priv->ips.cur_delay = new_delay; 791 dev_priv->ips.cur_delay = new_delay;
662 792
663 spin_unlock_irqrestore(&mchdev_lock, flags); 793 spin_unlock(&mchdev_lock);
664 794
665 return; 795 return;
666} 796}
@@ -668,34 +798,31 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
668static void notify_ring(struct drm_device *dev, 798static void notify_ring(struct drm_device *dev,
669 struct intel_ring_buffer *ring) 799 struct intel_ring_buffer *ring)
670{ 800{
671 struct drm_i915_private *dev_priv = dev->dev_private;
672
673 if (ring->obj == NULL) 801 if (ring->obj == NULL)
674 return; 802 return;
675 803
676 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 804 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
677 805
678 wake_up_all(&ring->irq_queue); 806 wake_up_all(&ring->irq_queue);
679 if (i915_enable_hangcheck) { 807 i915_queue_hangcheck(dev);
680 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
681 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
682 }
683} 808}
684 809
685static void gen6_pm_rps_work(struct work_struct *work) 810static void gen6_pm_rps_work(struct work_struct *work)
686{ 811{
687 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 812 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
688 rps.work); 813 rps.work);
689 u32 pm_iir, pm_imr; 814 u32 pm_iir;
690 u8 new_delay; 815 u8 new_delay;
691 816
692 spin_lock_irq(&dev_priv->rps.lock); 817 spin_lock_irq(&dev_priv->irq_lock);
693 pm_iir = dev_priv->rps.pm_iir; 818 pm_iir = dev_priv->rps.pm_iir;
694 dev_priv->rps.pm_iir = 0; 819 dev_priv->rps.pm_iir = 0;
695 pm_imr = I915_READ(GEN6_PMIMR);
696 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 820 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
697 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); 821 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
698 spin_unlock_irq(&dev_priv->rps.lock); 822 spin_unlock_irq(&dev_priv->irq_lock);
823
824 /* Make sure we didn't queue anything we're not going to process. */
825 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
699 826
700 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 827 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
701 return; 828 return;
@@ -781,13 +908,12 @@ static void ivybridge_parity_work(struct work_struct *work)
781 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 908 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
782 909
783 spin_lock_irqsave(&dev_priv->irq_lock, flags); 910 spin_lock_irqsave(&dev_priv->irq_lock, flags);
784 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 911 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
785 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
786 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 912 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
787 913
788 mutex_unlock(&dev_priv->dev->struct_mutex); 914 mutex_unlock(&dev_priv->dev->struct_mutex);
789 915
790 parity_event[0] = "L3_PARITY_ERROR=1"; 916 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
791 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 917 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
792 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 918 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
793 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 919 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
@@ -804,22 +930,31 @@ static void ivybridge_parity_work(struct work_struct *work)
804 kfree(parity_event[1]); 930 kfree(parity_event[1]);
805} 931}
806 932
807static void ivybridge_handle_parity_error(struct drm_device *dev) 933static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
808{ 934{
809 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 935 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
810 unsigned long flags;
811 936
812 if (!HAS_L3_GPU_CACHE(dev)) 937 if (!HAS_L3_GPU_CACHE(dev))
813 return; 938 return;
814 939
815 spin_lock_irqsave(&dev_priv->irq_lock, flags); 940 spin_lock(&dev_priv->irq_lock);
816 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 941 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
817 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 942 spin_unlock(&dev_priv->irq_lock);
818 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
819 943
820 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 944 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
821} 945}
822 946
947static void ilk_gt_irq_handler(struct drm_device *dev,
948 struct drm_i915_private *dev_priv,
949 u32 gt_iir)
950{
951 if (gt_iir &
952 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
953 notify_ring(dev, &dev_priv->ring[RCS]);
954 if (gt_iir & ILK_BSD_USER_INTERRUPT)
955 notify_ring(dev, &dev_priv->ring[VCS]);
956}
957
823static void snb_gt_irq_handler(struct drm_device *dev, 958static void snb_gt_irq_handler(struct drm_device *dev,
824 struct drm_i915_private *dev_priv, 959 struct drm_i915_private *dev_priv,
825 u32 gt_iir) 960 u32 gt_iir)
@@ -841,32 +976,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
841 } 976 }
842 977
843 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 978 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
844 ivybridge_handle_parity_error(dev); 979 ivybridge_parity_error_irq_handler(dev);
845}
846
847/* Legacy way of handling PM interrupts */
848static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
849 u32 pm_iir)
850{
851 unsigned long flags;
852
853 /*
854 * IIR bits should never already be set because IMR should
855 * prevent an interrupt from being shown in IIR. The warning
856 * displays a case where we've unsafely cleared
857 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
858 * type is not a problem, it displays a problem in the logic.
859 *
860 * The mask bit in IMR is cleared by dev_priv->rps.work.
861 */
862
863 spin_lock_irqsave(&dev_priv->rps.lock, flags);
864 dev_priv->rps.pm_iir |= pm_iir;
865 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
866 POSTING_READ(GEN6_PMIMR);
867 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
868
869 queue_work(dev_priv->wq, &dev_priv->rps.work);
870} 980}
871 981
872#define HPD_STORM_DETECT_PERIOD 1000 982#define HPD_STORM_DETECT_PERIOD 1000
@@ -886,6 +996,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
886 spin_lock(&dev_priv->irq_lock); 996 spin_lock(&dev_priv->irq_lock);
887 for (i = 1; i < HPD_NUM_PINS; i++) { 997 for (i = 1; i < HPD_NUM_PINS; i++) {
888 998
999 WARN(((hpd[i] & hotplug_trigger) &&
1000 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
1001 "Received HPD interrupt although disabled\n");
1002
889 if (!(hpd[i] & hotplug_trigger) || 1003 if (!(hpd[i] & hotplug_trigger) ||
890 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1004 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
891 continue; 1005 continue;
@@ -896,6 +1010,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
896 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1010 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
897 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1011 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
898 dev_priv->hpd_stats[i].hpd_cnt = 0; 1012 dev_priv->hpd_stats[i].hpd_cnt = 0;
1013 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
899 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1014 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
900 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1015 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
901 dev_priv->hpd_event_bits &= ~(1 << i); 1016 dev_priv->hpd_event_bits &= ~(1 << i);
@@ -903,6 +1018,8 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
903 storm_detected = true; 1018 storm_detected = true;
904 } else { 1019 } else {
905 dev_priv->hpd_stats[i].hpd_cnt++; 1020 dev_priv->hpd_stats[i].hpd_cnt++;
1021 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1022 dev_priv->hpd_stats[i].hpd_cnt);
906 } 1023 }
907 } 1024 }
908 1025
@@ -928,28 +1045,21 @@ static void dp_aux_irq_handler(struct drm_device *dev)
928 wake_up_all(&dev_priv->gmbus_wait_queue); 1045 wake_up_all(&dev_priv->gmbus_wait_queue);
929} 1046}
930 1047
931/* Unlike gen6_queue_rps_work() from which this function is originally derived, 1048/* The RPS events need forcewake, so we add them to a work queue and mask their
932 * we must be able to deal with other PM interrupts. This is complicated because 1049 * IMR bits until the work is done. Other interrupts can be processed without
933 * of the way in which we use the masks to defer the RPS work (which for 1050 * the work queue. */
934 * posterity is necessary because of forcewake). 1051static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
935 */
936static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
937 u32 pm_iir)
938{ 1052{
939 unsigned long flags; 1053 if (pm_iir & GEN6_PM_RPS_EVENTS) {
1054 spin_lock(&dev_priv->irq_lock);
1055 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
1056 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
1057 spin_unlock(&dev_priv->irq_lock);
940 1058
941 spin_lock_irqsave(&dev_priv->rps.lock, flags);
942 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
943 if (dev_priv->rps.pm_iir) {
944 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
945 /* never want to mask useful interrupts. (also posting read) */
946 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
947 /* TODO: if queue_work is slow, move it out of the spinlock */
948 queue_work(dev_priv->wq, &dev_priv->rps.work); 1059 queue_work(dev_priv->wq, &dev_priv->rps.work);
949 } 1060 }
950 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
951 1061
952 if (pm_iir & ~GEN6_PM_RPS_EVENTS) { 1062 if (HAS_VEBOX(dev_priv->dev)) {
953 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1063 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
954 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1064 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
955 1065
@@ -1028,8 +1138,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1028 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1138 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1029 gmbus_irq_handler(dev); 1139 gmbus_irq_handler(dev);
1030 1140
1031 if (pm_iir & GEN6_PM_RPS_EVENTS) 1141 if (pm_iir)
1032 gen6_queue_rps_work(dev_priv, pm_iir); 1142 gen6_rps_irq_handler(dev_priv, pm_iir);
1033 1143
1034 I915_WRITE(GTIIR, gt_iir); 1144 I915_WRITE(GTIIR, gt_iir);
1035 I915_WRITE(GEN6_PMIIR, pm_iir); 1145 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -1179,27 +1289,112 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1179 cpt_serr_int_handler(dev); 1289 cpt_serr_int_handler(dev);
1180} 1290}
1181 1291
1182static irqreturn_t ivybridge_irq_handler(int irq, void *arg) 1292static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1293{
1294 struct drm_i915_private *dev_priv = dev->dev_private;
1295
1296 if (de_iir & DE_AUX_CHANNEL_A)
1297 dp_aux_irq_handler(dev);
1298
1299 if (de_iir & DE_GSE)
1300 intel_opregion_asle_intr(dev);
1301
1302 if (de_iir & DE_PIPEA_VBLANK)
1303 drm_handle_vblank(dev, 0);
1304
1305 if (de_iir & DE_PIPEB_VBLANK)
1306 drm_handle_vblank(dev, 1);
1307
1308 if (de_iir & DE_POISON)
1309 DRM_ERROR("Poison interrupt\n");
1310
1311 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1312 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1313 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1314
1315 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1316 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1317 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1318
1319 if (de_iir & DE_PLANEA_FLIP_DONE) {
1320 intel_prepare_page_flip(dev, 0);
1321 intel_finish_page_flip_plane(dev, 0);
1322 }
1323
1324 if (de_iir & DE_PLANEB_FLIP_DONE) {
1325 intel_prepare_page_flip(dev, 1);
1326 intel_finish_page_flip_plane(dev, 1);
1327 }
1328
1329 /* check event from PCH */
1330 if (de_iir & DE_PCH_EVENT) {
1331 u32 pch_iir = I915_READ(SDEIIR);
1332
1333 if (HAS_PCH_CPT(dev))
1334 cpt_irq_handler(dev, pch_iir);
1335 else
1336 ibx_irq_handler(dev, pch_iir);
1337
1338 /* should clear PCH hotplug event before clear CPU irq */
1339 I915_WRITE(SDEIIR, pch_iir);
1340 }
1341
1342 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1343 ironlake_rps_change_irq_handler(dev);
1344}
1345
1346static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1347{
1348 struct drm_i915_private *dev_priv = dev->dev_private;
1349 int i;
1350
1351 if (de_iir & DE_ERR_INT_IVB)
1352 ivb_err_int_handler(dev);
1353
1354 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1355 dp_aux_irq_handler(dev);
1356
1357 if (de_iir & DE_GSE_IVB)
1358 intel_opregion_asle_intr(dev);
1359
1360 for (i = 0; i < 3; i++) {
1361 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1362 drm_handle_vblank(dev, i);
1363 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1364 intel_prepare_page_flip(dev, i);
1365 intel_finish_page_flip_plane(dev, i);
1366 }
1367 }
1368
1369 /* check event from PCH */
1370 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1371 u32 pch_iir = I915_READ(SDEIIR);
1372
1373 cpt_irq_handler(dev, pch_iir);
1374
1375 /* clear PCH hotplug event before clear CPU irq */
1376 I915_WRITE(SDEIIR, pch_iir);
1377 }
1378}
1379
1380static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1183{ 1381{
1184 struct drm_device *dev = (struct drm_device *) arg; 1382 struct drm_device *dev = (struct drm_device *) arg;
1185 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1383 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1186 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; 1384 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1187 irqreturn_t ret = IRQ_NONE; 1385 irqreturn_t ret = IRQ_NONE;
1188 int i; 1386 bool err_int_reenable = false;
1189 1387
1190 atomic_inc(&dev_priv->irq_received); 1388 atomic_inc(&dev_priv->irq_received);
1191 1389
1192 /* We get interrupts on unclaimed registers, so check for this before we 1390 /* We get interrupts on unclaimed registers, so check for this before we
1193 * do any I915_{READ,WRITE}. */ 1391 * do any I915_{READ,WRITE}. */
1194 if (IS_HASWELL(dev) && 1392 intel_uncore_check_errors(dev);
1195 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1196 DRM_ERROR("Unclaimed register before interrupt\n");
1197 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1198 }
1199 1393
1200 /* disable master interrupt before clearing iir */ 1394 /* disable master interrupt before clearing iir */
1201 de_ier = I915_READ(DEIER); 1395 de_ier = I915_READ(DEIER);
1202 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1396 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1397 POSTING_READ(DEIER);
1203 1398
1204 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1399 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1205 * interrupts will will be stored on its back queue, and then we'll be 1400 * interrupts will will be stored on its back queue, and then we'll be
@@ -1217,62 +1412,42 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1217 * handler. */ 1412 * handler. */
1218 if (IS_HASWELL(dev)) { 1413 if (IS_HASWELL(dev)) {
1219 spin_lock(&dev_priv->irq_lock); 1414 spin_lock(&dev_priv->irq_lock);
1220 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1415 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
1416 if (err_int_reenable)
1417 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1221 spin_unlock(&dev_priv->irq_lock); 1418 spin_unlock(&dev_priv->irq_lock);
1222 } 1419 }
1223 1420
1224 gt_iir = I915_READ(GTIIR); 1421 gt_iir = I915_READ(GTIIR);
1225 if (gt_iir) { 1422 if (gt_iir) {
1226 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1423 if (INTEL_INFO(dev)->gen >= 6)
1424 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1425 else
1426 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1227 I915_WRITE(GTIIR, gt_iir); 1427 I915_WRITE(GTIIR, gt_iir);
1228 ret = IRQ_HANDLED; 1428 ret = IRQ_HANDLED;
1229 } 1429 }
1230 1430
1231 de_iir = I915_READ(DEIIR); 1431 de_iir = I915_READ(DEIIR);
1232 if (de_iir) { 1432 if (de_iir) {
1233 if (de_iir & DE_ERR_INT_IVB) 1433 if (INTEL_INFO(dev)->gen >= 7)
1234 ivb_err_int_handler(dev); 1434 ivb_display_irq_handler(dev, de_iir);
1235 1435 else
1236 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1436 ilk_display_irq_handler(dev, de_iir);
1237 dp_aux_irq_handler(dev);
1238
1239 if (de_iir & DE_GSE_IVB)
1240 intel_opregion_asle_intr(dev);
1241
1242 for (i = 0; i < 3; i++) {
1243 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1244 drm_handle_vblank(dev, i);
1245 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1246 intel_prepare_page_flip(dev, i);
1247 intel_finish_page_flip_plane(dev, i);
1248 }
1249 }
1250
1251 /* check event from PCH */
1252 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1253 u32 pch_iir = I915_READ(SDEIIR);
1254
1255 cpt_irq_handler(dev, pch_iir);
1256
1257 /* clear PCH hotplug event before clear CPU irq */
1258 I915_WRITE(SDEIIR, pch_iir);
1259 }
1260
1261 I915_WRITE(DEIIR, de_iir); 1437 I915_WRITE(DEIIR, de_iir);
1262 ret = IRQ_HANDLED; 1438 ret = IRQ_HANDLED;
1263 } 1439 }
1264 1440
1265 pm_iir = I915_READ(GEN6_PMIIR); 1441 if (INTEL_INFO(dev)->gen >= 6) {
1266 if (pm_iir) { 1442 u32 pm_iir = I915_READ(GEN6_PMIIR);
1267 if (IS_HASWELL(dev)) 1443 if (pm_iir) {
1268 hsw_pm_irq_handler(dev_priv, pm_iir); 1444 gen6_rps_irq_handler(dev_priv, pm_iir);
1269 else if (pm_iir & GEN6_PM_RPS_EVENTS) 1445 I915_WRITE(GEN6_PMIIR, pm_iir);
1270 gen6_queue_rps_work(dev_priv, pm_iir); 1446 ret = IRQ_HANDLED;
1271 I915_WRITE(GEN6_PMIIR, pm_iir); 1447 }
1272 ret = IRQ_HANDLED;
1273 } 1448 }
1274 1449
1275 if (IS_HASWELL(dev)) { 1450 if (err_int_reenable) {
1276 spin_lock(&dev_priv->irq_lock); 1451 spin_lock(&dev_priv->irq_lock);
1277 if (ivb_can_enable_err_int(dev)) 1452 if (ivb_can_enable_err_int(dev))
1278 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1453 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
@@ -1289,119 +1464,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1289 return ret; 1464 return ret;
1290} 1465}
1291 1466
1292static void ilk_gt_irq_handler(struct drm_device *dev,
1293 struct drm_i915_private *dev_priv,
1294 u32 gt_iir)
1295{
1296 if (gt_iir &
1297 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1298 notify_ring(dev, &dev_priv->ring[RCS]);
1299 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1300 notify_ring(dev, &dev_priv->ring[VCS]);
1301}
1302
1303static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1304{
1305 struct drm_device *dev = (struct drm_device *) arg;
1306 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1307 int ret = IRQ_NONE;
1308 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
1309
1310 atomic_inc(&dev_priv->irq_received);
1311
1312 /* disable master interrupt before clearing iir */
1313 de_ier = I915_READ(DEIER);
1314 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1315 POSTING_READ(DEIER);
1316
1317 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1318 * interrupts will will be stored on its back queue, and then we'll be
1319 * able to process them after we restore SDEIER (as soon as we restore
1320 * it, we'll get an interrupt if SDEIIR still has something to process
1321 * due to its back queue). */
1322 sde_ier = I915_READ(SDEIER);
1323 I915_WRITE(SDEIER, 0);
1324 POSTING_READ(SDEIER);
1325
1326 de_iir = I915_READ(DEIIR);
1327 gt_iir = I915_READ(GTIIR);
1328 pm_iir = I915_READ(GEN6_PMIIR);
1329
1330 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
1331 goto done;
1332
1333 ret = IRQ_HANDLED;
1334
1335 if (IS_GEN5(dev))
1336 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1337 else
1338 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1339
1340 if (de_iir & DE_AUX_CHANNEL_A)
1341 dp_aux_irq_handler(dev);
1342
1343 if (de_iir & DE_GSE)
1344 intel_opregion_asle_intr(dev);
1345
1346 if (de_iir & DE_PIPEA_VBLANK)
1347 drm_handle_vblank(dev, 0);
1348
1349 if (de_iir & DE_PIPEB_VBLANK)
1350 drm_handle_vblank(dev, 1);
1351
1352 if (de_iir & DE_POISON)
1353 DRM_ERROR("Poison interrupt\n");
1354
1355 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1356 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1357 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1358
1359 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1360 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1361 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1362
1363 if (de_iir & DE_PLANEA_FLIP_DONE) {
1364 intel_prepare_page_flip(dev, 0);
1365 intel_finish_page_flip_plane(dev, 0);
1366 }
1367
1368 if (de_iir & DE_PLANEB_FLIP_DONE) {
1369 intel_prepare_page_flip(dev, 1);
1370 intel_finish_page_flip_plane(dev, 1);
1371 }
1372
1373 /* check event from PCH */
1374 if (de_iir & DE_PCH_EVENT) {
1375 u32 pch_iir = I915_READ(SDEIIR);
1376
1377 if (HAS_PCH_CPT(dev))
1378 cpt_irq_handler(dev, pch_iir);
1379 else
1380 ibx_irq_handler(dev, pch_iir);
1381
1382 /* should clear PCH hotplug event before clear CPU irq */
1383 I915_WRITE(SDEIIR, pch_iir);
1384 }
1385
1386 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1387 ironlake_handle_rps_change(dev);
1388
1389 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1390 gen6_queue_rps_work(dev_priv, pm_iir);
1391
1392 I915_WRITE(GTIIR, gt_iir);
1393 I915_WRITE(DEIIR, de_iir);
1394 I915_WRITE(GEN6_PMIIR, pm_iir);
1395
1396done:
1397 I915_WRITE(DEIER, de_ier);
1398 POSTING_READ(DEIER);
1399 I915_WRITE(SDEIER, sde_ier);
1400 POSTING_READ(SDEIER);
1401
1402 return ret;
1403}
1404
1405/** 1467/**
1406 * i915_error_work_func - do process context error handling work 1468 * i915_error_work_func - do process context error handling work
1407 * @work: work struct 1469 * @work: work struct
@@ -1417,9 +1479,9 @@ static void i915_error_work_func(struct work_struct *work)
1417 gpu_error); 1479 gpu_error);
1418 struct drm_device *dev = dev_priv->dev; 1480 struct drm_device *dev = dev_priv->dev;
1419 struct intel_ring_buffer *ring; 1481 struct intel_ring_buffer *ring;
1420 char *error_event[] = { "ERROR=1", NULL }; 1482 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1421 char *reset_event[] = { "RESET=1", NULL }; 1483 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1422 char *reset_done_event[] = { "ERROR=0", NULL }; 1484 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1423 int i, ret; 1485 int i, ret;
1424 1486
1425 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1487 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
@@ -1470,535 +1532,6 @@ static void i915_error_work_func(struct work_struct *work)
1470 } 1532 }
1471} 1533}
1472 1534
1473/* NB: please notice the memset */
1474static void i915_get_extra_instdone(struct drm_device *dev,
1475 uint32_t *instdone)
1476{
1477 struct drm_i915_private *dev_priv = dev->dev_private;
1478 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1479
1480 switch(INTEL_INFO(dev)->gen) {
1481 case 2:
1482 case 3:
1483 instdone[0] = I915_READ(INSTDONE);
1484 break;
1485 case 4:
1486 case 5:
1487 case 6:
1488 instdone[0] = I915_READ(INSTDONE_I965);
1489 instdone[1] = I915_READ(INSTDONE1);
1490 break;
1491 default:
1492 WARN_ONCE(1, "Unsupported platform\n");
1493 case 7:
1494 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1495 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1496 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1497 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1498 break;
1499 }
1500}
1501
1502#ifdef CONFIG_DEBUG_FS
1503static struct drm_i915_error_object *
1504i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1505 struct drm_i915_gem_object *src,
1506 const int num_pages)
1507{
1508 struct drm_i915_error_object *dst;
1509 int i;
1510 u32 reloc_offset;
1511
1512 if (src == NULL || src->pages == NULL)
1513 return NULL;
1514
1515 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1516 if (dst == NULL)
1517 return NULL;
1518
1519 reloc_offset = src->gtt_offset;
1520 for (i = 0; i < num_pages; i++) {
1521 unsigned long flags;
1522 void *d;
1523
1524 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1525 if (d == NULL)
1526 goto unwind;
1527
1528 local_irq_save(flags);
1529 if (reloc_offset < dev_priv->gtt.mappable_end &&
1530 src->has_global_gtt_mapping) {
1531 void __iomem *s;
1532
1533 /* Simply ignore tiling or any overlapping fence.
1534 * It's part of the error state, and this hopefully
1535 * captures what the GPU read.
1536 */
1537
1538 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1539 reloc_offset);
1540 memcpy_fromio(d, s, PAGE_SIZE);
1541 io_mapping_unmap_atomic(s);
1542 } else if (src->stolen) {
1543 unsigned long offset;
1544
1545 offset = dev_priv->mm.stolen_base;
1546 offset += src->stolen->start;
1547 offset += i << PAGE_SHIFT;
1548
1549 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1550 } else {
1551 struct page *page;
1552 void *s;
1553
1554 page = i915_gem_object_get_page(src, i);
1555
1556 drm_clflush_pages(&page, 1);
1557
1558 s = kmap_atomic(page);
1559 memcpy(d, s, PAGE_SIZE);
1560 kunmap_atomic(s);
1561
1562 drm_clflush_pages(&page, 1);
1563 }
1564 local_irq_restore(flags);
1565
1566 dst->pages[i] = d;
1567
1568 reloc_offset += PAGE_SIZE;
1569 }
1570 dst->page_count = num_pages;
1571 dst->gtt_offset = src->gtt_offset;
1572
1573 return dst;
1574
1575unwind:
1576 while (i--)
1577 kfree(dst->pages[i]);
1578 kfree(dst);
1579 return NULL;
1580}
1581#define i915_error_object_create(dev_priv, src) \
1582 i915_error_object_create_sized((dev_priv), (src), \
1583 (src)->base.size>>PAGE_SHIFT)
1584
1585static void
1586i915_error_object_free(struct drm_i915_error_object *obj)
1587{
1588 int page;
1589
1590 if (obj == NULL)
1591 return;
1592
1593 for (page = 0; page < obj->page_count; page++)
1594 kfree(obj->pages[page]);
1595
1596 kfree(obj);
1597}
1598
1599void
1600i915_error_state_free(struct kref *error_ref)
1601{
1602 struct drm_i915_error_state *error = container_of(error_ref,
1603 typeof(*error), ref);
1604 int i;
1605
1606 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1607 i915_error_object_free(error->ring[i].batchbuffer);
1608 i915_error_object_free(error->ring[i].ringbuffer);
1609 i915_error_object_free(error->ring[i].ctx);
1610 kfree(error->ring[i].requests);
1611 }
1612
1613 kfree(error->active_bo);
1614 kfree(error->overlay);
1615 kfree(error->display);
1616 kfree(error);
1617}
1618static void capture_bo(struct drm_i915_error_buffer *err,
1619 struct drm_i915_gem_object *obj)
1620{
1621 err->size = obj->base.size;
1622 err->name = obj->base.name;
1623 err->rseqno = obj->last_read_seqno;
1624 err->wseqno = obj->last_write_seqno;
1625 err->gtt_offset = obj->gtt_offset;
1626 err->read_domains = obj->base.read_domains;
1627 err->write_domain = obj->base.write_domain;
1628 err->fence_reg = obj->fence_reg;
1629 err->pinned = 0;
1630 if (obj->pin_count > 0)
1631 err->pinned = 1;
1632 if (obj->user_pin_count > 0)
1633 err->pinned = -1;
1634 err->tiling = obj->tiling_mode;
1635 err->dirty = obj->dirty;
1636 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1637 err->ring = obj->ring ? obj->ring->id : -1;
1638 err->cache_level = obj->cache_level;
1639}
1640
1641static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1642 int count, struct list_head *head)
1643{
1644 struct drm_i915_gem_object *obj;
1645 int i = 0;
1646
1647 list_for_each_entry(obj, head, mm_list) {
1648 capture_bo(err++, obj);
1649 if (++i == count)
1650 break;
1651 }
1652
1653 return i;
1654}
1655
1656static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1657 int count, struct list_head *head)
1658{
1659 struct drm_i915_gem_object *obj;
1660 int i = 0;
1661
1662 list_for_each_entry(obj, head, global_list) {
1663 if (obj->pin_count == 0)
1664 continue;
1665
1666 capture_bo(err++, obj);
1667 if (++i == count)
1668 break;
1669 }
1670
1671 return i;
1672}
1673
1674static void i915_gem_record_fences(struct drm_device *dev,
1675 struct drm_i915_error_state *error)
1676{
1677 struct drm_i915_private *dev_priv = dev->dev_private;
1678 int i;
1679
1680 /* Fences */
1681 switch (INTEL_INFO(dev)->gen) {
1682 case 7:
1683 case 6:
1684 for (i = 0; i < dev_priv->num_fence_regs; i++)
1685 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1686 break;
1687 case 5:
1688 case 4:
1689 for (i = 0; i < 16; i++)
1690 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1691 break;
1692 case 3:
1693 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1694 for (i = 0; i < 8; i++)
1695 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1696 case 2:
1697 for (i = 0; i < 8; i++)
1698 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1699 break;
1700
1701 default:
1702 BUG();
1703 }
1704}
1705
1706static struct drm_i915_error_object *
1707i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1708 struct intel_ring_buffer *ring)
1709{
1710 struct drm_i915_gem_object *obj;
1711 u32 seqno;
1712
1713 if (!ring->get_seqno)
1714 return NULL;
1715
1716 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1717 u32 acthd = I915_READ(ACTHD);
1718
1719 if (WARN_ON(ring->id != RCS))
1720 return NULL;
1721
1722 obj = ring->private;
1723 if (acthd >= obj->gtt_offset &&
1724 acthd < obj->gtt_offset + obj->base.size)
1725 return i915_error_object_create(dev_priv, obj);
1726 }
1727
1728 seqno = ring->get_seqno(ring, false);
1729 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1730 if (obj->ring != ring)
1731 continue;
1732
1733 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1734 continue;
1735
1736 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1737 continue;
1738
1739 /* We need to copy these to an anonymous buffer as the simplest
1740 * method to avoid being overwritten by userspace.
1741 */
1742 return i915_error_object_create(dev_priv, obj);
1743 }
1744
1745 return NULL;
1746}
1747
1748static void i915_record_ring_state(struct drm_device *dev,
1749 struct drm_i915_error_state *error,
1750 struct intel_ring_buffer *ring)
1751{
1752 struct drm_i915_private *dev_priv = dev->dev_private;
1753
1754 if (INTEL_INFO(dev)->gen >= 6) {
1755 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1756 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1757 error->semaphore_mboxes[ring->id][0]
1758 = I915_READ(RING_SYNC_0(ring->mmio_base));
1759 error->semaphore_mboxes[ring->id][1]
1760 = I915_READ(RING_SYNC_1(ring->mmio_base));
1761 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1762 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1763 }
1764
1765 if (INTEL_INFO(dev)->gen >= 4) {
1766 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1767 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1768 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1769 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1770 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1771 if (ring->id == RCS)
1772 error->bbaddr = I915_READ64(BB_ADDR);
1773 } else {
1774 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1775 error->ipeir[ring->id] = I915_READ(IPEIR);
1776 error->ipehr[ring->id] = I915_READ(IPEHR);
1777 error->instdone[ring->id] = I915_READ(INSTDONE);
1778 }
1779
1780 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1781 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1782 error->seqno[ring->id] = ring->get_seqno(ring, false);
1783 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1784 error->head[ring->id] = I915_READ_HEAD(ring);
1785 error->tail[ring->id] = I915_READ_TAIL(ring);
1786 error->ctl[ring->id] = I915_READ_CTL(ring);
1787
1788 error->cpu_ring_head[ring->id] = ring->head;
1789 error->cpu_ring_tail[ring->id] = ring->tail;
1790}
1791
1792
1793static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1794 struct drm_i915_error_state *error,
1795 struct drm_i915_error_ring *ering)
1796{
1797 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1798 struct drm_i915_gem_object *obj;
1799
1800 /* Currently render ring is the only HW context user */
1801 if (ring->id != RCS || !error->ccid)
1802 return;
1803
1804 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1805 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1806 ering->ctx = i915_error_object_create_sized(dev_priv,
1807 obj, 1);
1808 }
1809 }
1810}
1811
1812static void i915_gem_record_rings(struct drm_device *dev,
1813 struct drm_i915_error_state *error)
1814{
1815 struct drm_i915_private *dev_priv = dev->dev_private;
1816 struct intel_ring_buffer *ring;
1817 struct drm_i915_gem_request *request;
1818 int i, count;
1819
1820 for_each_ring(ring, dev_priv, i) {
1821 i915_record_ring_state(dev, error, ring);
1822
1823 error->ring[i].batchbuffer =
1824 i915_error_first_batchbuffer(dev_priv, ring);
1825
1826 error->ring[i].ringbuffer =
1827 i915_error_object_create(dev_priv, ring->obj);
1828
1829
1830 i915_gem_record_active_context(ring, error, &error->ring[i]);
1831
1832 count = 0;
1833 list_for_each_entry(request, &ring->request_list, list)
1834 count++;
1835
1836 error->ring[i].num_requests = count;
1837 error->ring[i].requests =
1838 kmalloc(count*sizeof(struct drm_i915_error_request),
1839 GFP_ATOMIC);
1840 if (error->ring[i].requests == NULL) {
1841 error->ring[i].num_requests = 0;
1842 continue;
1843 }
1844
1845 count = 0;
1846 list_for_each_entry(request, &ring->request_list, list) {
1847 struct drm_i915_error_request *erq;
1848
1849 erq = &error->ring[i].requests[count++];
1850 erq->seqno = request->seqno;
1851 erq->jiffies = request->emitted_jiffies;
1852 erq->tail = request->tail;
1853 }
1854 }
1855}
1856
1857/**
1858 * i915_capture_error_state - capture an error record for later analysis
1859 * @dev: drm device
1860 *
1861 * Should be called when an error is detected (either a hang or an error
1862 * interrupt) to capture error state from the time of the error. Fills
1863 * out a structure which becomes available in debugfs for user level tools
1864 * to pick up.
1865 */
1866static void i915_capture_error_state(struct drm_device *dev)
1867{
1868 struct drm_i915_private *dev_priv = dev->dev_private;
1869 struct drm_i915_gem_object *obj;
1870 struct drm_i915_error_state *error;
1871 unsigned long flags;
1872 int i, pipe;
1873
1874 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1875 error = dev_priv->gpu_error.first_error;
1876 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1877 if (error)
1878 return;
1879
1880 /* Account for pipe specific data like PIPE*STAT */
1881 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1882 if (!error) {
1883 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1884 return;
1885 }
1886
1887 DRM_INFO("capturing error event; look for more information in "
1888 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1889 dev->primary->index);
1890
1891 kref_init(&error->ref);
1892 error->eir = I915_READ(EIR);
1893 error->pgtbl_er = I915_READ(PGTBL_ER);
1894 if (HAS_HW_CONTEXTS(dev))
1895 error->ccid = I915_READ(CCID);
1896
1897 if (HAS_PCH_SPLIT(dev))
1898 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1899 else if (IS_VALLEYVIEW(dev))
1900 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1901 else if (IS_GEN2(dev))
1902 error->ier = I915_READ16(IER);
1903 else
1904 error->ier = I915_READ(IER);
1905
1906 if (INTEL_INFO(dev)->gen >= 6)
1907 error->derrmr = I915_READ(DERRMR);
1908
1909 if (IS_VALLEYVIEW(dev))
1910 error->forcewake = I915_READ(FORCEWAKE_VLV);
1911 else if (INTEL_INFO(dev)->gen >= 7)
1912 error->forcewake = I915_READ(FORCEWAKE_MT);
1913 else if (INTEL_INFO(dev)->gen == 6)
1914 error->forcewake = I915_READ(FORCEWAKE);
1915
1916 if (!HAS_PCH_SPLIT(dev))
1917 for_each_pipe(pipe)
1918 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1919
1920 if (INTEL_INFO(dev)->gen >= 6) {
1921 error->error = I915_READ(ERROR_GEN6);
1922 error->done_reg = I915_READ(DONE_REG);
1923 }
1924
1925 if (INTEL_INFO(dev)->gen == 7)
1926 error->err_int = I915_READ(GEN7_ERR_INT);
1927
1928 i915_get_extra_instdone(dev, error->extra_instdone);
1929
1930 i915_gem_record_fences(dev, error);
1931 i915_gem_record_rings(dev, error);
1932
1933 /* Record buffers on the active and pinned lists. */
1934 error->active_bo = NULL;
1935 error->pinned_bo = NULL;
1936
1937 i = 0;
1938 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1939 i++;
1940 error->active_bo_count = i;
1941 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1942 if (obj->pin_count)
1943 i++;
1944 error->pinned_bo_count = i - error->active_bo_count;
1945
1946 error->active_bo = NULL;
1947 error->pinned_bo = NULL;
1948 if (i) {
1949 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1950 GFP_ATOMIC);
1951 if (error->active_bo)
1952 error->pinned_bo =
1953 error->active_bo + error->active_bo_count;
1954 }
1955
1956 if (error->active_bo)
1957 error->active_bo_count =
1958 capture_active_bo(error->active_bo,
1959 error->active_bo_count,
1960 &dev_priv->mm.active_list);
1961
1962 if (error->pinned_bo)
1963 error->pinned_bo_count =
1964 capture_pinned_bo(error->pinned_bo,
1965 error->pinned_bo_count,
1966 &dev_priv->mm.bound_list);
1967
1968 do_gettimeofday(&error->time);
1969
1970 error->overlay = intel_overlay_capture_error_state(dev);
1971 error->display = intel_display_capture_error_state(dev);
1972
1973 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1974 if (dev_priv->gpu_error.first_error == NULL) {
1975 dev_priv->gpu_error.first_error = error;
1976 error = NULL;
1977 }
1978 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1979
1980 if (error)
1981 i915_error_state_free(&error->ref);
1982}
1983
1984void i915_destroy_error_state(struct drm_device *dev)
1985{
1986 struct drm_i915_private *dev_priv = dev->dev_private;
1987 struct drm_i915_error_state *error;
1988 unsigned long flags;
1989
1990 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1991 error = dev_priv->gpu_error.first_error;
1992 dev_priv->gpu_error.first_error = NULL;
1993 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1994
1995 if (error)
1996 kref_put(&error->ref, i915_error_state_free);
1997}
1998#else
1999#define i915_capture_error_state(x)
2000#endif
2001
2002static void i915_report_and_clear_eir(struct drm_device *dev) 1535static void i915_report_and_clear_eir(struct drm_device *dev)
2003{ 1536{
2004 struct drm_i915_private *dev_priv = dev->dev_private; 1537 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2155,10 +1688,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
2155 if (INTEL_INFO(dev)->gen >= 4) { 1688 if (INTEL_INFO(dev)->gen >= 4) {
2156 int dspsurf = DSPSURF(intel_crtc->plane); 1689 int dspsurf = DSPSURF(intel_crtc->plane);
2157 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1690 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2158 obj->gtt_offset; 1691 i915_gem_obj_ggtt_offset(obj);
2159 } else { 1692 } else {
2160 int dspaddr = DSPADDR(intel_crtc->plane); 1693 int dspaddr = DSPADDR(intel_crtc->plane);
2161 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1694 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2162 crtc->y * crtc->fb->pitches[0] + 1695 crtc->y * crtc->fb->pitches[0] +
2163 crtc->x * crtc->fb->bits_per_pixel/8); 1696 crtc->x * crtc->fb->bits_per_pixel/8);
2164 } 1697 }
@@ -2202,29 +1735,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2202{ 1735{
2203 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1736 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2204 unsigned long irqflags; 1737 unsigned long irqflags;
1738 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1739 DE_PIPE_VBLANK_ILK(pipe);
2205 1740
2206 if (!i915_pipe_enabled(dev, pipe)) 1741 if (!i915_pipe_enabled(dev, pipe))
2207 return -EINVAL; 1742 return -EINVAL;
2208 1743
2209 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1744 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2210 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1745 ironlake_enable_display_irq(dev_priv, bit);
2211 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2212 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2213
2214 return 0;
2215}
2216
2217static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
2218{
2219 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2220 unsigned long irqflags;
2221
2222 if (!i915_pipe_enabled(dev, pipe))
2223 return -EINVAL;
2224
2225 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2226 ironlake_enable_display_irq(dev_priv,
2227 DE_PIPEA_VBLANK_IVB << (5 * pipe));
2228 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1746 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2229 1747
2230 return 0; 1748 return 0;
@@ -2275,21 +1793,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2275{ 1793{
2276 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1794 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2277 unsigned long irqflags; 1795 unsigned long irqflags;
1796 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1797 DE_PIPE_VBLANK_ILK(pipe);
2278 1798
2279 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1799 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2280 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1800 ironlake_disable_display_irq(dev_priv, bit);
2281 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2282 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2283}
2284
2285static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
2286{
2287 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2288 unsigned long irqflags;
2289
2290 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2291 ironlake_disable_display_irq(dev_priv,
2292 DE_PIPEA_VBLANK_IVB << (pipe * 5));
2293 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1801 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2294} 1802}
2295 1803
@@ -2392,10 +1900,10 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2392 u32 tmp; 1900 u32 tmp;
2393 1901
2394 if (ring->hangcheck.acthd != acthd) 1902 if (ring->hangcheck.acthd != acthd)
2395 return active; 1903 return HANGCHECK_ACTIVE;
2396 1904
2397 if (IS_GEN2(dev)) 1905 if (IS_GEN2(dev))
2398 return hung; 1906 return HANGCHECK_HUNG;
2399 1907
2400 /* Is the chip hanging on a WAIT_FOR_EVENT? 1908 /* Is the chip hanging on a WAIT_FOR_EVENT?
2401 * If so we can simply poke the RB_WAIT bit 1909 * If so we can simply poke the RB_WAIT bit
@@ -2407,24 +1915,24 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2407 DRM_ERROR("Kicking stuck wait on %s\n", 1915 DRM_ERROR("Kicking stuck wait on %s\n",
2408 ring->name); 1916 ring->name);
2409 I915_WRITE_CTL(ring, tmp); 1917 I915_WRITE_CTL(ring, tmp);
2410 return kick; 1918 return HANGCHECK_KICK;
2411 } 1919 }
2412 1920
2413 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 1921 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2414 switch (semaphore_passed(ring)) { 1922 switch (semaphore_passed(ring)) {
2415 default: 1923 default:
2416 return hung; 1924 return HANGCHECK_HUNG;
2417 case 1: 1925 case 1:
2418 DRM_ERROR("Kicking stuck semaphore on %s\n", 1926 DRM_ERROR("Kicking stuck semaphore on %s\n",
2419 ring->name); 1927 ring->name);
2420 I915_WRITE_CTL(ring, tmp); 1928 I915_WRITE_CTL(ring, tmp);
2421 return kick; 1929 return HANGCHECK_KICK;
2422 case 0: 1930 case 0:
2423 return wait; 1931 return HANGCHECK_WAIT;
2424 } 1932 }
2425 } 1933 }
2426 1934
2427 return hung; 1935 return HANGCHECK_HUNG;
2428} 1936}
2429 1937
2430/** 1938/**
@@ -2435,7 +1943,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2435 * we kick the ring. If we see no progress on three subsequent calls 1943 * we kick the ring. If we see no progress on three subsequent calls
2436 * we assume chip is wedged and try to fix it by resetting the chip. 1944 * we assume chip is wedged and try to fix it by resetting the chip.
2437 */ 1945 */
2438void i915_hangcheck_elapsed(unsigned long data) 1946static void i915_hangcheck_elapsed(unsigned long data)
2439{ 1947{
2440 struct drm_device *dev = (struct drm_device *)data; 1948 struct drm_device *dev = (struct drm_device *)data;
2441 drm_i915_private_t *dev_priv = dev->dev_private; 1949 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2471,8 +1979,6 @@ void i915_hangcheck_elapsed(unsigned long data)
2471 } else 1979 } else
2472 busy = false; 1980 busy = false;
2473 } else { 1981 } else {
2474 int score;
2475
2476 /* We always increment the hangcheck score 1982 /* We always increment the hangcheck score
2477 * if the ring is busy and still processing 1983 * if the ring is busy and still processing
2478 * the same request, so that no single request 1984 * the same request, so that no single request
@@ -2492,21 +1998,19 @@ void i915_hangcheck_elapsed(unsigned long data)
2492 acthd); 1998 acthd);
2493 1999
2494 switch (ring->hangcheck.action) { 2000 switch (ring->hangcheck.action) {
2495 case wait: 2001 case HANGCHECK_WAIT:
2496 score = 0;
2497 break; 2002 break;
2498 case active: 2003 case HANGCHECK_ACTIVE:
2499 score = BUSY; 2004 ring->hangcheck.score += BUSY;
2500 break; 2005 break;
2501 case kick: 2006 case HANGCHECK_KICK:
2502 score = KICK; 2007 ring->hangcheck.score += KICK;
2503 break; 2008 break;
2504 case hung: 2009 case HANGCHECK_HUNG:
2505 score = HUNG; 2010 ring->hangcheck.score += HUNG;
2506 stuck[i] = true; 2011 stuck[i] = true;
2507 break; 2012 break;
2508 } 2013 }
2509 ring->hangcheck.score += score;
2510 } 2014 }
2511 } else { 2015 } else {
2512 /* Gradually reduce the count so that we catch DoS 2016 /* Gradually reduce the count so that we catch DoS
@@ -2536,9 +2040,17 @@ void i915_hangcheck_elapsed(unsigned long data)
2536 if (busy_count) 2040 if (busy_count)
2537 /* Reset timer case chip hangs without another request 2041 /* Reset timer case chip hangs without another request
2538 * being added */ 2042 * being added */
2539 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2043 i915_queue_hangcheck(dev);
2540 round_jiffies_up(jiffies + 2044}
2541 DRM_I915_HANGCHECK_JIFFIES)); 2045
2046void i915_queue_hangcheck(struct drm_device *dev)
2047{
2048 struct drm_i915_private *dev_priv = dev->dev_private;
2049 if (!i915_enable_hangcheck)
2050 return;
2051
2052 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2053 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2542} 2054}
2543 2055
2544static void ibx_irq_preinstall(struct drm_device *dev) 2056static void ibx_irq_preinstall(struct drm_device *dev)
@@ -2560,31 +2072,26 @@ static void ibx_irq_preinstall(struct drm_device *dev)
2560 POSTING_READ(SDEIER); 2072 POSTING_READ(SDEIER);
2561} 2073}
2562 2074
2563/* drm_dma.h hooks 2075static void gen5_gt_irq_preinstall(struct drm_device *dev)
2564*/
2565static void ironlake_irq_preinstall(struct drm_device *dev)
2566{ 2076{
2567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2077 struct drm_i915_private *dev_priv = dev->dev_private;
2568
2569 atomic_set(&dev_priv->irq_received, 0);
2570
2571 I915_WRITE(HWSTAM, 0xeffe);
2572
2573 /* XXX hotplug from PCH */
2574
2575 I915_WRITE(DEIMR, 0xffffffff);
2576 I915_WRITE(DEIER, 0x0);
2577 POSTING_READ(DEIER);
2578 2078
2579 /* and GT */ 2079 /* and GT */
2580 I915_WRITE(GTIMR, 0xffffffff); 2080 I915_WRITE(GTIMR, 0xffffffff);
2581 I915_WRITE(GTIER, 0x0); 2081 I915_WRITE(GTIER, 0x0);
2582 POSTING_READ(GTIER); 2082 POSTING_READ(GTIER);
2583 2083
2584 ibx_irq_preinstall(dev); 2084 if (INTEL_INFO(dev)->gen >= 6) {
2085 /* and PM */
2086 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2087 I915_WRITE(GEN6_PMIER, 0x0);
2088 POSTING_READ(GEN6_PMIER);
2089 }
2585} 2090}
2586 2091
2587static void ivybridge_irq_preinstall(struct drm_device *dev) 2092/* drm_dma.h hooks
2093*/
2094static void ironlake_irq_preinstall(struct drm_device *dev)
2588{ 2095{
2589 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2096 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2590 2097
@@ -2592,21 +2099,11 @@ static void ivybridge_irq_preinstall(struct drm_device *dev)
2592 2099
2593 I915_WRITE(HWSTAM, 0xeffe); 2100 I915_WRITE(HWSTAM, 0xeffe);
2594 2101
2595 /* XXX hotplug from PCH */
2596
2597 I915_WRITE(DEIMR, 0xffffffff); 2102 I915_WRITE(DEIMR, 0xffffffff);
2598 I915_WRITE(DEIER, 0x0); 2103 I915_WRITE(DEIER, 0x0);
2599 POSTING_READ(DEIER); 2104 POSTING_READ(DEIER);
2600 2105
2601 /* and GT */ 2106 gen5_gt_irq_preinstall(dev);
2602 I915_WRITE(GTIMR, 0xffffffff);
2603 I915_WRITE(GTIER, 0x0);
2604 POSTING_READ(GTIER);
2605
2606 /* Power management */
2607 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2608 I915_WRITE(GEN6_PMIER, 0x0);
2609 POSTING_READ(GEN6_PMIER);
2610 2107
2611 ibx_irq_preinstall(dev); 2108 ibx_irq_preinstall(dev);
2612} 2109}
@@ -2627,9 +2124,8 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2627 /* and GT */ 2124 /* and GT */
2628 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2125 I915_WRITE(GTIIR, I915_READ(GTIIR));
2629 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2126 I915_WRITE(GTIIR, I915_READ(GTIIR));
2630 I915_WRITE(GTIMR, 0xffffffff); 2127
2631 I915_WRITE(GTIER, 0x0); 2128 gen5_gt_irq_preinstall(dev);
2632 POSTING_READ(GTIER);
2633 2129
2634 I915_WRITE(DPINVGTT, 0xff); 2130 I915_WRITE(DPINVGTT, 0xff);
2635 2131
@@ -2648,22 +2144,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
2648 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2144 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2649 struct drm_mode_config *mode_config = &dev->mode_config; 2145 struct drm_mode_config *mode_config = &dev->mode_config;
2650 struct intel_encoder *intel_encoder; 2146 struct intel_encoder *intel_encoder;
2651 u32 mask = ~I915_READ(SDEIMR); 2147 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
2652 u32 hotplug;
2653 2148
2654 if (HAS_PCH_IBX(dev)) { 2149 if (HAS_PCH_IBX(dev)) {
2655 mask &= ~SDE_HOTPLUG_MASK; 2150 hotplug_irqs = SDE_HOTPLUG_MASK;
2656 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2151 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2657 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2152 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2658 mask |= hpd_ibx[intel_encoder->hpd_pin]; 2153 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
2659 } else { 2154 } else {
2660 mask &= ~SDE_HOTPLUG_MASK_CPT; 2155 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2661 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2156 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2662 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2157 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2663 mask |= hpd_cpt[intel_encoder->hpd_pin]; 2158 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
2664 } 2159 }
2665 2160
2666 I915_WRITE(SDEIMR, ~mask); 2161 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2667 2162
2668 /* 2163 /*
2669 * Enable digital hotplug on the PCH, and configure the DP short pulse 2164 * Enable digital hotplug on the PCH, and configure the DP short pulse
@@ -2700,123 +2195,103 @@ static void ibx_irq_postinstall(struct drm_device *dev)
2700 I915_WRITE(SDEIMR, ~mask); 2195 I915_WRITE(SDEIMR, ~mask);
2701} 2196}
2702 2197
2703static int ironlake_irq_postinstall(struct drm_device *dev) 2198static void gen5_gt_irq_postinstall(struct drm_device *dev)
2704{ 2199{
2705 unsigned long irqflags; 2200 struct drm_i915_private *dev_priv = dev->dev_private;
2706 2201 u32 pm_irqs, gt_irqs;
2707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2708 /* enable kind of interrupts always enabled */
2709 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2710 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2711 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2712 DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
2713 u32 gt_irqs;
2714
2715 dev_priv->irq_mask = ~display_mask;
2716 2202
2717 /* should always can generate irq */ 2203 pm_irqs = gt_irqs = 0;
2718 I915_WRITE(DEIIR, I915_READ(DEIIR));
2719 I915_WRITE(DEIMR, dev_priv->irq_mask);
2720 I915_WRITE(DEIER, display_mask |
2721 DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
2722 POSTING_READ(DEIER);
2723 2204
2724 dev_priv->gt_irq_mask = ~0; 2205 dev_priv->gt_irq_mask = ~0;
2206 if (HAS_L3_GPU_CACHE(dev)) {
2207 /* L3 parity interrupt is always unmasked. */
2208 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2209 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2210 }
2725 2211
2726 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2212 gt_irqs |= GT_RENDER_USER_INTERRUPT;
2727 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2213 if (IS_GEN5(dev)) {
2728
2729 gt_irqs = GT_RENDER_USER_INTERRUPT;
2730
2731 if (IS_GEN6(dev))
2732 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2733 else
2734 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2214 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2735 ILK_BSD_USER_INTERRUPT; 2215 ILK_BSD_USER_INTERRUPT;
2216 } else {
2217 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2218 }
2736 2219
2220 I915_WRITE(GTIIR, I915_READ(GTIIR));
2221 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2737 I915_WRITE(GTIER, gt_irqs); 2222 I915_WRITE(GTIER, gt_irqs);
2738 POSTING_READ(GTIER); 2223 POSTING_READ(GTIER);
2739 2224
2740 ibx_irq_postinstall(dev); 2225 if (INTEL_INFO(dev)->gen >= 6) {
2226 pm_irqs |= GEN6_PM_RPS_EVENTS;
2741 2227
2742 if (IS_IRONLAKE_M(dev)) { 2228 if (HAS_VEBOX(dev))
2743 /* Enable PCU event interrupts 2229 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2744 *
2745 * spinlocking not required here for correctness since interrupt
2746 * setup is guaranteed to run in single-threaded context. But we
2747 * need it to make the assert_spin_locked happy. */
2748 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2749 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2750 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2751 }
2752 2230
2753 return 0; 2231 dev_priv->pm_irq_mask = 0xffffffff;
2232 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2233 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2234 I915_WRITE(GEN6_PMIER, pm_irqs);
2235 POSTING_READ(GEN6_PMIER);
2236 }
2754} 2237}
2755 2238
2756static int ivybridge_irq_postinstall(struct drm_device *dev) 2239static int ironlake_irq_postinstall(struct drm_device *dev)
2757{ 2240{
2241 unsigned long irqflags;
2758 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2759 /* enable kind of interrupts always enabled */ 2243 u32 display_mask, extra_mask;
2760 u32 display_mask = 2244
2761 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 2245 if (INTEL_INFO(dev)->gen >= 7) {
2762 DE_PLANEC_FLIP_DONE_IVB | 2246 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2763 DE_PLANEB_FLIP_DONE_IVB | 2247 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2764 DE_PLANEA_FLIP_DONE_IVB | 2248 DE_PLANEB_FLIP_DONE_IVB |
2765 DE_AUX_CHANNEL_A_IVB | 2249 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2766 DE_ERR_INT_IVB; 2250 DE_ERR_INT_IVB);
2767 u32 pm_irqs = GEN6_PM_RPS_EVENTS; 2251 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2768 u32 gt_irqs; 2252 DE_PIPEA_VBLANK_IVB);
2253
2254 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2255 } else {
2256 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2257 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2258 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2259 DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
2260 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2261 }
2769 2262
2770 dev_priv->irq_mask = ~display_mask; 2263 dev_priv->irq_mask = ~display_mask;
2771 2264
2772 /* should always can generate irq */ 2265 /* should always can generate irq */
2773 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2774 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2266 I915_WRITE(DEIIR, I915_READ(DEIIR));
2775 I915_WRITE(DEIMR, dev_priv->irq_mask); 2267 I915_WRITE(DEIMR, dev_priv->irq_mask);
2776 I915_WRITE(DEIER, 2268 I915_WRITE(DEIER, display_mask | extra_mask);
2777 display_mask |
2778 DE_PIPEC_VBLANK_IVB |
2779 DE_PIPEB_VBLANK_IVB |
2780 DE_PIPEA_VBLANK_IVB);
2781 POSTING_READ(DEIER); 2269 POSTING_READ(DEIER);
2782 2270
2783 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2271 gen5_gt_irq_postinstall(dev);
2784
2785 I915_WRITE(GTIIR, I915_READ(GTIIR));
2786 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2787
2788 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2789 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2790 I915_WRITE(GTIER, gt_irqs);
2791 POSTING_READ(GTIER);
2792
2793 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2794 if (HAS_VEBOX(dev))
2795 pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2796 PM_VEBOX_CS_ERROR_INTERRUPT;
2797
2798 /* Our enable/disable rps functions may touch these registers so
2799 * make sure to set a known state for only the non-RPS bits.
2800 * The RMW is extra paranoia since this should be called after being set
2801 * to a known state in preinstall.
2802 * */
2803 I915_WRITE(GEN6_PMIMR,
2804 (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2805 I915_WRITE(GEN6_PMIER,
2806 (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2807 POSTING_READ(GEN6_PMIER);
2808 2272
2809 ibx_irq_postinstall(dev); 2273 ibx_irq_postinstall(dev);
2810 2274
2275 if (IS_IRONLAKE_M(dev)) {
2276 /* Enable PCU event interrupts
2277 *
2278 * spinlocking not required here for correctness since interrupt
2279 * setup is guaranteed to run in single-threaded context. But we
2280 * need it to make the assert_spin_locked happy. */
2281 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2282 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2283 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2284 }
2285
2811 return 0; 2286 return 0;
2812} 2287}
2813 2288
2814static int valleyview_irq_postinstall(struct drm_device *dev) 2289static int valleyview_irq_postinstall(struct drm_device *dev)
2815{ 2290{
2816 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2291 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2817 u32 gt_irqs;
2818 u32 enable_mask; 2292 u32 enable_mask;
2819 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2293 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2294 unsigned long irqflags;
2820 2295
2821 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2296 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2822 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2297 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2842,20 +2317,18 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2842 I915_WRITE(PIPESTAT(1), 0xffff); 2317 I915_WRITE(PIPESTAT(1), 0xffff);
2843 POSTING_READ(VLV_IER); 2318 POSTING_READ(VLV_IER);
2844 2319
2320 /* Interrupt setup is already guaranteed to be single-threaded, this is
2321 * just to make the assert_spin_locked check happy. */
2322 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2845 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2323 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2846 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2324 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2847 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2325 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2326 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2848 2327
2849 I915_WRITE(VLV_IIR, 0xffffffff); 2328 I915_WRITE(VLV_IIR, 0xffffffff);
2850 I915_WRITE(VLV_IIR, 0xffffffff); 2329 I915_WRITE(VLV_IIR, 0xffffffff);
2851 2330
2852 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2331 gen5_gt_irq_postinstall(dev);
2853 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2854
2855 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2856 GT_BLT_USER_INTERRUPT;
2857 I915_WRITE(GTIER, gt_irqs);
2858 POSTING_READ(GTIER);
2859 2332
2860 /* ack & enable invalid PTE error interrupts */ 2333 /* ack & enable invalid PTE error interrupts */
2861#if 0 /* FIXME: add support to irq handler for checking these bits */ 2334#if 0 /* FIXME: add support to irq handler for checking these bits */
@@ -3001,7 +2474,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3001 u16 iir, new_iir; 2474 u16 iir, new_iir;
3002 u32 pipe_stats[2]; 2475 u32 pipe_stats[2];
3003 unsigned long irqflags; 2476 unsigned long irqflags;
3004 int irq_received;
3005 int pipe; 2477 int pipe;
3006 u16 flip_mask = 2478 u16 flip_mask =
3007 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2479 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -3035,7 +2507,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3035 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2507 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3036 pipe_name(pipe)); 2508 pipe_name(pipe));
3037 I915_WRITE(reg, pipe_stats[pipe]); 2509 I915_WRITE(reg, pipe_stats[pipe]);
3038 irq_received = 1;
3039 } 2510 }
3040 } 2511 }
3041 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3323,6 +2794,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
3323 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2794 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3324 u32 enable_mask; 2795 u32 enable_mask;
3325 u32 error_mask; 2796 u32 error_mask;
2797 unsigned long irqflags;
3326 2798
3327 /* Unmask the interrupts that we always want on. */ 2799 /* Unmask the interrupts that we always want on. */
3328 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2800 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -3341,7 +2813,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
3341 if (IS_G4X(dev)) 2813 if (IS_G4X(dev))
3342 enable_mask |= I915_BSD_USER_INTERRUPT; 2814 enable_mask |= I915_BSD_USER_INTERRUPT;
3343 2815
2816 /* Interrupt setup is already guaranteed to be single-threaded, this is
2817 * just to make the assert_spin_locked check happy. */
2818 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3344 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2819 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2820 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3345 2821
3346 /* 2822 /*
3347 * Enable some error detection, note the instruction error mask 2823 * Enable some error detection, note the instruction error mask
@@ -3616,15 +3092,6 @@ void intel_irq_init(struct drm_device *dev)
3616 dev->driver->enable_vblank = valleyview_enable_vblank; 3092 dev->driver->enable_vblank = valleyview_enable_vblank;
3617 dev->driver->disable_vblank = valleyview_disable_vblank; 3093 dev->driver->disable_vblank = valleyview_disable_vblank;
3618 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3094 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3619 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3620 /* Share uninstall handlers with ILK/SNB */
3621 dev->driver->irq_handler = ivybridge_irq_handler;
3622 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
3623 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3624 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3625 dev->driver->enable_vblank = ivybridge_enable_vblank;
3626 dev->driver->disable_vblank = ivybridge_disable_vblank;
3627 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3628 } else if (HAS_PCH_SPLIT(dev)) { 3095 } else if (HAS_PCH_SPLIT(dev)) {
3629 dev->driver->irq_handler = ironlake_irq_handler; 3096 dev->driver->irq_handler = ironlake_irq_handler;
3630 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3097 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -3683,3 +3150,67 @@ void intel_hpd_init(struct drm_device *dev)
3683 dev_priv->display.hpd_irq_setup(dev); 3150 dev_priv->display.hpd_irq_setup(dev);
3684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3151 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3685} 3152}
3153
3154/* Disable interrupts so we can allow Package C8+. */
3155void hsw_pc8_disable_interrupts(struct drm_device *dev)
3156{
3157 struct drm_i915_private *dev_priv = dev->dev_private;
3158 unsigned long irqflags;
3159
3160 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3161
3162 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3163 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3164 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3165 dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3166 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3167
3168 ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3169 ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3170 ilk_disable_gt_irq(dev_priv, 0xffffffff);
3171 snb_disable_pm_irq(dev_priv, 0xffffffff);
3172
3173 dev_priv->pc8.irqs_disabled = true;
3174
3175 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3176}
3177
3178/* Restore interrupts so we can recover from Package C8+. */
3179void hsw_pc8_restore_interrupts(struct drm_device *dev)
3180{
3181 struct drm_i915_private *dev_priv = dev->dev_private;
3182 unsigned long irqflags;
3183 uint32_t val, expected;
3184
3185 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3186
3187 val = I915_READ(DEIMR);
3188 expected = ~DE_PCH_EVENT_IVB;
3189 WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3190
3191 val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3192 expected = ~SDE_HOTPLUG_MASK_CPT;
3193 WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3194 val, expected);
3195
3196 val = I915_READ(GTIMR);
3197 expected = 0xffffffff;
3198 WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3199
3200 val = I915_READ(GEN6_PMIMR);
3201 expected = 0xffffffff;
3202 WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3203 expected);
3204
3205 dev_priv->pc8.irqs_disabled = false;
3206
3207 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3208 ibx_enable_display_interrupt(dev_priv,
3209 ~dev_priv->pc8.regsave.sdeimr &
3210 ~SDE_HOTPLUG_MASK_CPT);
3211 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3212 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3213 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3214
3215 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3216}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f2326fc60ac9..b6a58f720f9a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -61,6 +61,12 @@
61#define GC_LOW_FREQUENCY_ENABLE (1 << 7) 61#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
62#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 62#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
63#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) 63#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
64#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4)
65#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4)
66#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4)
67#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4)
68#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4)
69#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4)
64#define GC_DISPLAY_CLOCK_MASK (7 << 4) 70#define GC_DISPLAY_CLOCK_MASK (7 << 4)
65#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) 71#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
66#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) 72#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
@@ -363,6 +369,7 @@
363#define PUNIT_REG_GPU_LFM 0xd3 369#define PUNIT_REG_GPU_LFM 0xd3
364#define PUNIT_REG_GPU_FREQ_REQ 0xd4 370#define PUNIT_REG_GPU_FREQ_REQ 0xd4
365#define PUNIT_REG_GPU_FREQ_STS 0xd8 371#define PUNIT_REG_GPU_FREQ_STS 0xd8
372#define GENFREQSTATUS (1<<0)
366#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc 373#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
367 374
368#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ 375#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
@@ -680,6 +687,7 @@
680#define ERR_INT_FIFO_UNDERRUN_C (1<<6) 687#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
681#define ERR_INT_FIFO_UNDERRUN_B (1<<3) 688#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
682#define ERR_INT_FIFO_UNDERRUN_A (1<<0) 689#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
690#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
683 691
684#define FPGA_DBG 0x42300 692#define FPGA_DBG 0x42300
685#define FPGA_DBG_RM_NOCLAIM (1<<31) 693#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -752,6 +760,8 @@
752 will not assert AGPBUSY# and will only 760 will not assert AGPBUSY# and will only
753 be delivered when out of C3. */ 761 be delivered when out of C3. */
754#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ 762#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
763#define INSTPM_TLB_INVALIDATE (1<<9)
764#define INSTPM_SYNC_FLUSH (1<<5)
755#define ACTHD 0x020c8 765#define ACTHD 0x020c8
756#define FW_BLC 0x020d8 766#define FW_BLC 0x020d8
757#define FW_BLC2 0x020dc 767#define FW_BLC2 0x020dc
@@ -1125,7 +1135,8 @@
1125#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) 1135#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
1126#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) 1136#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
1127#define DPLL_VCO_ENABLE (1 << 31) 1137#define DPLL_VCO_ENABLE (1 << 31)
1128#define DPLL_DVO_HIGH_SPEED (1 << 30) 1138#define DPLL_SDVO_HIGH_SPEED (1 << 30)
1139#define DPLL_DVO_2X_MODE (1 << 30)
1129#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) 1140#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
1130#define DPLL_SYNCLOCK_ENABLE (1 << 29) 1141#define DPLL_SYNCLOCK_ENABLE (1 << 29)
1131#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) 1142#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
@@ -1438,6 +1449,8 @@
1438#define MCH_SSKPD_WM0_MASK 0x3f 1449#define MCH_SSKPD_WM0_MASK 0x3f
1439#define MCH_SSKPD_WM0_VAL 0xc 1450#define MCH_SSKPD_WM0_VAL 0xc
1440 1451
1452#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c)
1453
1441/* Clocking configuration register */ 1454/* Clocking configuration register */
1442#define CLKCFG 0x10c00 1455#define CLKCFG 0x10c00
1443#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ 1456#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -1694,15 +1707,26 @@
1694 */ 1707 */
1695#define CCID 0x2180 1708#define CCID 0x2180
1696#define CCID_EN (1<<0) 1709#define CCID_EN (1<<0)
1710/*
1711 * Notes on SNB/IVB/VLV context size:
1712 * - Power context is saved elsewhere (LLC or stolen)
1713 * - Ring/execlist context is saved on SNB, not on IVB
1714 * - Extended context size already includes render context size
1715 * - We always need to follow the extended context size.
1716 * SNB BSpec has comments indicating that we should use the
1717 * render context size instead if execlists are disabled, but
1718 * based on empirical testing that's just nonsense.
1719 * - Pipelined/VF state is saved on SNB/IVB respectively
1720 * - GT1 size just indicates how much of render context
1721 * doesn't need saving on GT1
1722 */
1697#define CXT_SIZE 0x21a0 1723#define CXT_SIZE 0x21a0
1698#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) 1724#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
1699#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) 1725#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
1700#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) 1726#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
1701#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) 1727#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
1702#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) 1728#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
1703#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \ 1729#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
1704 GEN6_CXT_RING_SIZE(cxt_reg) + \
1705 GEN6_CXT_RENDER_SIZE(cxt_reg) + \
1706 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ 1730 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
1707 GEN6_CXT_PIPELINE_SIZE(cxt_reg)) 1731 GEN6_CXT_PIPELINE_SIZE(cxt_reg))
1708#define GEN7_CXT_SIZE 0x21a8 1732#define GEN7_CXT_SIZE 0x21a8
@@ -1712,11 +1736,7 @@
1712#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) 1736#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
1713#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) 1737#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
1714#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) 1738#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
1715#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \ 1739#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1716 GEN7_CXT_RING_SIZE(ctx_reg) + \
1717 GEN7_CXT_RENDER_SIZE(ctx_reg) + \
1718 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1719 GEN7_CXT_GT1_SIZE(ctx_reg) + \
1720 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1740 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1721/* Haswell does have the CXT_SIZE register however it does not appear to be 1741/* Haswell does have the CXT_SIZE register however it does not appear to be
1722 * valid. Now, docs explain in dwords what is in the context object. The full 1742 * valid. Now, docs explain in dwords what is in the context object. The full
@@ -1776,6 +1796,71 @@
1776#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) 1796#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
1777#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1797#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
1778 1798
1799/* HSW eDP PSR registers */
1800#define EDP_PSR_CTL 0x64800
1801#define EDP_PSR_ENABLE (1<<31)
1802#define EDP_PSR_LINK_DISABLE (0<<27)
1803#define EDP_PSR_LINK_STANDBY (1<<27)
1804#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
1805#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
1806#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
1807#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
1808#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
1809#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
1810#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
1811#define EDP_PSR_TP1_TP2_SEL (0<<11)
1812#define EDP_PSR_TP1_TP3_SEL (1<<11)
1813#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
1814#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
1815#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
1816#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
1817#define EDP_PSR_TP1_TIME_500us (0<<4)
1818#define EDP_PSR_TP1_TIME_100us (1<<4)
1819#define EDP_PSR_TP1_TIME_2500us (2<<4)
1820#define EDP_PSR_TP1_TIME_0us (3<<4)
1821#define EDP_PSR_IDLE_FRAME_SHIFT 0
1822
1823#define EDP_PSR_AUX_CTL 0x64810
1824#define EDP_PSR_AUX_DATA1 0x64814
1825#define EDP_PSR_DPCD_COMMAND 0x80060000
1826#define EDP_PSR_AUX_DATA2 0x64818
1827#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
1828#define EDP_PSR_AUX_DATA3 0x6481c
1829#define EDP_PSR_AUX_DATA4 0x64820
1830#define EDP_PSR_AUX_DATA5 0x64824
1831
1832#define EDP_PSR_STATUS_CTL 0x64840
1833#define EDP_PSR_STATUS_STATE_MASK (7<<29)
1834#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
1835#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
1836#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
1837#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
1838#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
1839#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
1840#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
1841#define EDP_PSR_STATUS_LINK_MASK (3<<26)
1842#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
1843#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
1844#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
1845#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
1846#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
1847#define EDP_PSR_STATUS_COUNT_SHIFT 16
1848#define EDP_PSR_STATUS_COUNT_MASK 0xf
1849#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
1850#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
1851#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
1852#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
1853#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
1854#define EDP_PSR_STATUS_IDLE_MASK 0xf
1855
1856#define EDP_PSR_PERF_CNT 0x64844
1857#define EDP_PSR_PERF_CNT_MASK 0xffffff
1858
1859#define EDP_PSR_DEBUG_CTL 0x64860
1860#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
1861#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
1862#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
1863
1779/* VGA port control */ 1864/* VGA port control */
1780#define ADPA 0x61100 1865#define ADPA 0x61100
1781#define PCH_ADPA 0xe1100 1866#define PCH_ADPA 0xe1100
@@ -1856,10 +1941,16 @@
1856#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1941#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
1857 1942
1858#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) 1943#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
1859/* HDMI/DP bits are gen4+ */ 1944/*
1860#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) 1945 * HDMI/DP bits are gen4+
1946 *
1947 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
1948 * Please check the detailed lore in the commit message for for experimental
1949 * evidence.
1950 */
1951#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
1861#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) 1952#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
1862#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) 1953#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
1863#define PORTD_HOTPLUG_INT_STATUS (3 << 21) 1954#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
1864#define PORTC_HOTPLUG_INT_STATUS (3 << 19) 1955#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
1865#define PORTB_HOTPLUG_INT_STATUS (3 << 17) 1956#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
@@ -2045,6 +2136,7 @@
2045 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte 2136 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
2046 * of the infoframe structure specified by CEA-861. */ 2137 * of the infoframe structure specified by CEA-861. */
2047#define VIDEO_DIP_DATA_SIZE 32 2138#define VIDEO_DIP_DATA_SIZE 32
2139#define VIDEO_DIP_VSC_DATA_SIZE 36
2048#define VIDEO_DIP_CTL 0x61170 2140#define VIDEO_DIP_CTL 0x61170
2049/* Pre HSW: */ 2141/* Pre HSW: */
2050#define VIDEO_DIP_ENABLE (1 << 31) 2142#define VIDEO_DIP_ENABLE (1 << 31)
@@ -2192,6 +2284,8 @@
2192#define BLC_PWM_CPU_CTL2 0x48250 2284#define BLC_PWM_CPU_CTL2 0x48250
2193#define BLC_PWM_CPU_CTL 0x48254 2285#define BLC_PWM_CPU_CTL 0x48254
2194 2286
2287#define HSW_BLC_PWM2_CTL 0x48350
2288
2195/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is 2289/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
2196 * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ 2290 * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
2197#define BLC_PWM_PCH_CTL1 0xc8250 2291#define BLC_PWM_PCH_CTL1 0xc8250
@@ -2200,6 +2294,12 @@
2200#define BLM_PCH_POLARITY (1 << 29) 2294#define BLM_PCH_POLARITY (1 << 29)
2201#define BLC_PWM_PCH_CTL2 0xc8254 2295#define BLC_PWM_PCH_CTL2 0xc8254
2202 2296
2297#define UTIL_PIN_CTL 0x48400
2298#define UTIL_PIN_ENABLE (1 << 31)
2299
2300#define PCH_GTC_CTL 0xe7000
2301#define PCH_GTC_ENABLE (1 << 31)
2302
2203/* TV port control */ 2303/* TV port control */
2204#define TV_CTL 0x68000 2304#define TV_CTL 0x68000
2205/** Enables the TV encoder */ 2305/** Enables the TV encoder */
@@ -3113,9 +3213,6 @@
3113#define MLTR_WM2_SHIFT 8 3213#define MLTR_WM2_SHIFT 8
3114/* the unit of memory self-refresh latency time is 0.5us */ 3214/* the unit of memory self-refresh latency time is 0.5us */
3115#define ILK_SRLT_MASK 0x3f 3215#define ILK_SRLT_MASK 0x3f
3116#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
3117#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
3118#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
3119 3216
3120/* define the fifo size on Ironlake */ 3217/* define the fifo size on Ironlake */
3121#define ILK_DISPLAY_FIFO 128 3218#define ILK_DISPLAY_FIFO 128
@@ -3162,12 +3259,6 @@
3162#define SSKPD_WM2_SHIFT 16 3259#define SSKPD_WM2_SHIFT 16
3163#define SSKPD_WM3_SHIFT 24 3260#define SSKPD_WM3_SHIFT 24
3164 3261
3165#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
3166#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
3167#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
3168#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
3169#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
3170
3171/* 3262/*
3172 * The two pipe frame counter registers are not synchronized, so 3263 * The two pipe frame counter registers are not synchronized, so
3173 * reading a stable value is somewhat tricky. The following code 3264 * reading a stable value is somewhat tricky. The following code
@@ -3718,6 +3809,9 @@
3718#define DE_PLANEA_FLIP_DONE_IVB (1<<3) 3809#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
3719#define DE_PIPEA_VBLANK_IVB (1<<0) 3810#define DE_PIPEA_VBLANK_IVB (1<<0)
3720 3811
3812#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
3813#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
3814
3721#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ 3815#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
3722#define MASTER_INTERRUPT_ENABLE (1<<31) 3816#define MASTER_INTERRUPT_ENABLE (1<<31)
3723 3817
@@ -3880,6 +3974,7 @@
3880#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) 3974#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
3881#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) 3975#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
3882#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) 3976#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
3977#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
3883 3978
3884/* digital port hotplug */ 3979/* digital port hotplug */
3885#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ 3980#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
@@ -4073,6 +4168,8 @@
4073 _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) 4168 _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
4074#define HSW_TVIDEO_DIP_AVI_DATA(trans) \ 4169#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
4075 _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) 4170 _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
4171#define HSW_TVIDEO_DIP_VS_DATA(trans) \
4172 _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B)
4076#define HSW_TVIDEO_DIP_SPD_DATA(trans) \ 4173#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
4077 _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) 4174 _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
4078#define HSW_TVIDEO_DIP_GCP(trans) \ 4175#define HSW_TVIDEO_DIP_GCP(trans) \
@@ -4080,6 +4177,13 @@
4080#define HSW_TVIDEO_DIP_VSC_DATA(trans) \ 4177#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
4081 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) 4178 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
4082 4179
4180#define HSW_STEREO_3D_CTL_A 0x70020
4181#define S3D_ENABLE (1<<31)
4182#define HSW_STEREO_3D_CTL_B 0x71020
4183
4184#define HSW_STEREO_3D_CTL(trans) \
4185 _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
4186
4083#define _PCH_TRANS_HTOTAL_B 0xe1000 4187#define _PCH_TRANS_HTOTAL_B 0xe1000
4084#define _PCH_TRANS_HBLANK_B 0xe1004 4188#define _PCH_TRANS_HBLANK_B 0xe1004
4085#define _PCH_TRANS_HSYNC_B 0xe1008 4189#define _PCH_TRANS_HSYNC_B 0xe1008
@@ -4432,7 +4536,7 @@
4432#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) 4536#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
4433#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) 4537#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
4434#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) 4538#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
4435#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22) 4539#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
4436 4540
4437/* legacy values */ 4541/* legacy values */
4438#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) 4542#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
@@ -4468,6 +4572,10 @@
4468#define GT_FIFO_FREE_ENTRIES 0x120008 4572#define GT_FIFO_FREE_ENTRIES 0x120008
4469#define GT_FIFO_NUM_RESERVED_ENTRIES 20 4573#define GT_FIFO_NUM_RESERVED_ENTRIES 20
4470 4574
4575#define HSW_IDICR 0x9008
4576#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
4577#define HSW_EDRAM_PRESENT 0x120010
4578
4471#define GEN6_UCGCTL1 0x9400 4579#define GEN6_UCGCTL1 0x9400
4472# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) 4580# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
4473# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) 4581# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -4736,8 +4844,8 @@
4736#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ 4844#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
4737#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ 4845#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
4738#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ 4846#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
4739#define HSW_PWR_WELL_ENABLE (1<<31) 4847#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
4740#define HSW_PWR_WELL_STATE (1<<30) 4848#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
4741#define HSW_PWR_WELL_CTL5 0x45410 4849#define HSW_PWR_WELL_CTL5 0x45410
4742#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) 4850#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
4743#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) 4851#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
@@ -4858,7 +4966,8 @@
4858#define SBI_SSCAUXDIV6 0x0610 4966#define SBI_SSCAUXDIV6 0x0610
4859#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) 4967#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
4860#define SBI_DBUFF0 0x2a00 4968#define SBI_DBUFF0 0x2a00
4861#define SBI_DBUFF0_ENABLE (1<<0) 4969#define SBI_GEN0 0x1f00
4970#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
4862 4971
4863/* LPT PIXCLK_GATE */ 4972/* LPT PIXCLK_GATE */
4864#define PIXCLK_GATE 0xC6020 4973#define PIXCLK_GATE 0xC6020
@@ -4924,7 +5033,14 @@
4924#define LCPLL_CLK_FREQ_450 (0<<26) 5033#define LCPLL_CLK_FREQ_450 (0<<26)
4925#define LCPLL_CD_CLOCK_DISABLE (1<<25) 5034#define LCPLL_CD_CLOCK_DISABLE (1<<25)
4926#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) 5035#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
5036#define LCPLL_POWER_DOWN_ALLOW (1<<22)
4927#define LCPLL_CD_SOURCE_FCLK (1<<21) 5037#define LCPLL_CD_SOURCE_FCLK (1<<21)
5038#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
5039
5040#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
5041#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
5042#define D_COMP_COMP_FORCE (1<<8)
5043#define D_COMP_COMP_DISABLE (1<<0)
4928 5044
4929/* Pipe WM_LINETIME - watermark line time */ 5045/* Pipe WM_LINETIME - watermark line time */
4930#define PIPE_WM_LINETIME_A 0x45270 5046#define PIPE_WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 6875b5654c63..a777e7f3b0df 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = {
409 NULL, 409 NULL,
410}; 410};
411 411
412static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
413 struct bin_attribute *attr, char *buf,
414 loff_t off, size_t count)
415{
416
417 struct device *kdev = container_of(kobj, struct device, kobj);
418 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
419 struct drm_device *dev = minor->dev;
420 struct i915_error_state_file_priv error_priv;
421 struct drm_i915_error_state_buf error_str;
422 ssize_t ret_count = 0;
423 int ret;
424
425 memset(&error_priv, 0, sizeof(error_priv));
426
427 ret = i915_error_state_buf_init(&error_str, count, off);
428 if (ret)
429 return ret;
430
431 error_priv.dev = dev;
432 i915_error_state_get(dev, &error_priv);
433
434 ret = i915_error_state_to_str(&error_str, &error_priv);
435 if (ret)
436 goto out;
437
438 ret_count = count < error_str.bytes ? count : error_str.bytes;
439
440 memcpy(buf, error_str.buf, ret_count);
441out:
442 i915_error_state_put(&error_priv);
443 i915_error_state_buf_release(&error_str);
444
445 return ret ?: ret_count;
446}
447
448static ssize_t error_state_write(struct file *file, struct kobject *kobj,
449 struct bin_attribute *attr, char *buf,
450 loff_t off, size_t count)
451{
452 struct device *kdev = container_of(kobj, struct device, kobj);
453 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
454 struct drm_device *dev = minor->dev;
455 int ret;
456
457 DRM_DEBUG_DRIVER("Resetting error state\n");
458
459 ret = mutex_lock_interruptible(&dev->struct_mutex);
460 if (ret)
461 return ret;
462
463 i915_destroy_error_state(dev);
464 mutex_unlock(&dev->struct_mutex);
465
466 return count;
467}
468
469static struct bin_attribute error_state_attr = {
470 .attr.name = "error",
471 .attr.mode = S_IRUSR | S_IWUSR,
472 .size = 0,
473 .read = error_state_read,
474 .write = error_state_write,
475};
476
412void i915_setup_sysfs(struct drm_device *dev) 477void i915_setup_sysfs(struct drm_device *dev)
413{ 478{
414 int ret; 479 int ret;
@@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev)
432 if (ret) 497 if (ret)
433 DRM_ERROR("gen6 sysfs setup failed\n"); 498 DRM_ERROR("gen6 sysfs setup failed\n");
434 } 499 }
500
501 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
502 &error_state_attr);
503 if (ret)
504 DRM_ERROR("error_state sysfs setup failed\n");
435} 505}
436 506
437void i915_teardown_sysfs(struct drm_device *dev) 507void i915_teardown_sysfs(struct drm_device *dev)
438{ 508{
509 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
439 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 510 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
440 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 511 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
441#ifdef CONFIG_PM 512#ifdef CONFIG_PM
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 3db4a6817713..e2c5ee6f6194 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -33,47 +33,52 @@ TRACE_EVENT(i915_gem_object_create,
33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
34); 34);
35 35
36TRACE_EVENT(i915_gem_object_bind, 36TRACE_EVENT(i915_vma_bind,
37 TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), 37 TP_PROTO(struct i915_vma *vma, bool mappable),
38 TP_ARGS(obj, mappable), 38 TP_ARGS(vma, mappable),
39 39
40 TP_STRUCT__entry( 40 TP_STRUCT__entry(
41 __field(struct drm_i915_gem_object *, obj) 41 __field(struct drm_i915_gem_object *, obj)
42 __field(struct i915_address_space *, vm)
42 __field(u32, offset) 43 __field(u32, offset)
43 __field(u32, size) 44 __field(u32, size)
44 __field(bool, mappable) 45 __field(bool, mappable)
45 ), 46 ),
46 47
47 TP_fast_assign( 48 TP_fast_assign(
48 __entry->obj = obj; 49 __entry->obj = vma->obj;
49 __entry->offset = obj->gtt_space->start; 50 __entry->vm = vma->vm;
50 __entry->size = obj->gtt_space->size; 51 __entry->offset = vma->node.start;
52 __entry->size = vma->node.size;
51 __entry->mappable = mappable; 53 __entry->mappable = mappable;
52 ), 54 ),
53 55
54 TP_printk("obj=%p, offset=%08x size=%x%s", 56 TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
55 __entry->obj, __entry->offset, __entry->size, 57 __entry->obj, __entry->offset, __entry->size,
56 __entry->mappable ? ", mappable" : "") 58 __entry->mappable ? ", mappable" : "",
59 __entry->vm)
57); 60);
58 61
59TRACE_EVENT(i915_gem_object_unbind, 62TRACE_EVENT(i915_vma_unbind,
60 TP_PROTO(struct drm_i915_gem_object *obj), 63 TP_PROTO(struct i915_vma *vma),
61 TP_ARGS(obj), 64 TP_ARGS(vma),
62 65
63 TP_STRUCT__entry( 66 TP_STRUCT__entry(
64 __field(struct drm_i915_gem_object *, obj) 67 __field(struct drm_i915_gem_object *, obj)
68 __field(struct i915_address_space *, vm)
65 __field(u32, offset) 69 __field(u32, offset)
66 __field(u32, size) 70 __field(u32, size)
67 ), 71 ),
68 72
69 TP_fast_assign( 73 TP_fast_assign(
70 __entry->obj = obj; 74 __entry->obj = vma->obj;
71 __entry->offset = obj->gtt_space->start; 75 __entry->vm = vma->vm;
72 __entry->size = obj->gtt_space->size; 76 __entry->offset = vma->node.start;
77 __entry->size = vma->node.size;
73 ), 78 ),
74 79
75 TP_printk("obj=%p, offset=%08x size=%x", 80 TP_printk("obj=%p, offset=%08x size=%x vm=%p",
76 __entry->obj, __entry->offset, __entry->size) 81 __entry->obj, __entry->offset, __entry->size, __entry->vm)
77); 82);
78 83
79TRACE_EVENT(i915_gem_object_change_domain, 84TRACE_EVENT(i915_gem_object_change_domain,
@@ -406,10 +411,12 @@ TRACE_EVENT(i915_flip_complete,
406 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 411 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
407); 412);
408 413
409TRACE_EVENT(i915_reg_rw, 414TRACE_EVENT_CONDITION(i915_reg_rw,
410 TP_PROTO(bool write, u32 reg, u64 val, int len), 415 TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
416
417 TP_ARGS(write, reg, val, len, trace),
411 418
412 TP_ARGS(write, reg, val, len), 419 TP_CONDITION(trace),
413 420
414 TP_STRUCT__entry( 421 TP_STRUCT__entry(
415 __field(u64, val) 422 __field(u64, val)
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bcbbaea2a78e..57fe1ae32a0d 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -28,7 +28,7 @@ static const u8 intel_dsm_guid[] = {
28 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c 28 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
29}; 29};
30 30
31static int intel_dsm(acpi_handle handle, int func, int arg) 31static int intel_dsm(acpi_handle handle, int func)
32{ 32{
33 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 33 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
34 struct acpi_object_list input; 34 struct acpi_object_list input;
@@ -46,8 +46,9 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
46 params[1].integer.value = INTEL_DSM_REVISION_ID; 46 params[1].integer.value = INTEL_DSM_REVISION_ID;
47 params[2].type = ACPI_TYPE_INTEGER; 47 params[2].type = ACPI_TYPE_INTEGER;
48 params[2].integer.value = func; 48 params[2].integer.value = func;
49 params[3].type = ACPI_TYPE_INTEGER; 49 params[3].type = ACPI_TYPE_PACKAGE;
50 params[3].integer.value = arg; 50 params[3].package.count = 0;
51 params[3].package.elements = NULL;
51 52
52 ret = acpi_evaluate_object(handle, "_DSM", &input, &output); 53 ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
53 if (ret) { 54 if (ret) {
@@ -151,8 +152,9 @@ static void intel_dsm_platform_mux_info(void)
151 params[1].integer.value = INTEL_DSM_REVISION_ID; 152 params[1].integer.value = INTEL_DSM_REVISION_ID;
152 params[2].type = ACPI_TYPE_INTEGER; 153 params[2].type = ACPI_TYPE_INTEGER;
153 params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; 154 params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
154 params[3].type = ACPI_TYPE_INTEGER; 155 params[3].type = ACPI_TYPE_PACKAGE;
155 params[3].integer.value = 0; 156 params[3].package.count = 0;
157 params[3].package.elements = NULL;
156 158
157 ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, 159 ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
158 &output); 160 &output);
@@ -205,7 +207,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
205 return false; 207 return false;
206 } 208 }
207 209
208 ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); 210 ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS);
209 if (ret < 0) { 211 if (ret < 0) {
210 DRM_DEBUG_KMS("failed to get supported _DSM functions\n"); 212 DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
211 return false; 213 return false;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3acec8c48166..b5a3875f22c7 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -52,15 +52,14 @@ struct intel_crt {
52 u32 adpa_reg; 52 u32 adpa_reg;
53}; 53};
54 54
55static struct intel_crt *intel_attached_crt(struct drm_connector *connector) 55static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
56{ 56{
57 return container_of(intel_attached_encoder(connector), 57 return container_of(encoder, struct intel_crt, base);
58 struct intel_crt, base);
59} 58}
60 59
61static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) 60static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
62{ 61{
63 return container_of(encoder, struct intel_crt, base); 62 return intel_encoder_to_crt(intel_attached_encoder(connector));
64} 63}
65 64
66static bool intel_crt_get_hw_state(struct intel_encoder *encoder, 65static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
@@ -238,17 +237,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
238 return true; 237 return true;
239} 238}
240 239
241static void intel_crt_mode_set(struct drm_encoder *encoder, 240static void intel_crt_mode_set(struct intel_encoder *encoder)
242 struct drm_display_mode *mode,
243 struct drm_display_mode *adjusted_mode)
244{ 241{
245 242
246 struct drm_device *dev = encoder->dev; 243 struct drm_device *dev = encoder->base.dev;
247 struct drm_crtc *crtc = encoder->crtc; 244 struct intel_crt *crt = intel_encoder_to_crt(encoder);
248 struct intel_crt *crt = 245 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
249 intel_encoder_to_crt(to_intel_encoder(encoder));
250 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
251 struct drm_i915_private *dev_priv = dev->dev_private; 246 struct drm_i915_private *dev_priv = dev->dev_private;
247 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
252 u32 adpa; 248 u32 adpa;
253 249
254 if (HAS_PCH_SPLIT(dev)) 250 if (HAS_PCH_SPLIT(dev))
@@ -265,14 +261,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
265 if (HAS_PCH_LPT(dev)) 261 if (HAS_PCH_LPT(dev))
266 ; /* Those bits don't exist here */ 262 ; /* Those bits don't exist here */
267 else if (HAS_PCH_CPT(dev)) 263 else if (HAS_PCH_CPT(dev))
268 adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); 264 adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
269 else if (intel_crtc->pipe == 0) 265 else if (crtc->pipe == 0)
270 adpa |= ADPA_PIPE_A_SELECT; 266 adpa |= ADPA_PIPE_A_SELECT;
271 else 267 else
272 adpa |= ADPA_PIPE_B_SELECT; 268 adpa |= ADPA_PIPE_B_SELECT;
273 269
274 if (!HAS_PCH_SPLIT(dev)) 270 if (!HAS_PCH_SPLIT(dev))
275 I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); 271 I915_WRITE(BCLRPAT(crtc->pipe), 0);
276 272
277 I915_WRITE(crt->adpa_reg, adpa); 273 I915_WRITE(crt->adpa_reg, adpa);
278} 274}
@@ -613,6 +609,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
613 enum drm_connector_status status; 609 enum drm_connector_status status;
614 struct intel_load_detect_pipe tmp; 610 struct intel_load_detect_pipe tmp;
615 611
612 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
613 connector->base.id, drm_get_connector_name(connector),
614 force);
615
616 if (I915_HAS_HOTPLUG(dev)) { 616 if (I915_HAS_HOTPLUG(dev)) {
617 /* We can not rely on the HPD pin always being correctly wired 617 /* We can not rely on the HPD pin always being correctly wired
618 * up, for example many KVM do not pass it through, and so 618 * up, for example many KVM do not pass it through, and so
@@ -707,10 +707,6 @@ static void intel_crt_reset(struct drm_connector *connector)
707 * Routines for controlling stuff on the analog port 707 * Routines for controlling stuff on the analog port
708 */ 708 */
709 709
710static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
711 .mode_set = intel_crt_mode_set,
712};
713
714static const struct drm_connector_funcs intel_crt_connector_funcs = { 710static const struct drm_connector_funcs intel_crt_connector_funcs = {
715 .reset = intel_crt_reset, 711 .reset = intel_crt_reset,
716 .dpms = intel_crt_dpms, 712 .dpms = intel_crt_dpms,
@@ -800,6 +796,7 @@ void intel_crt_init(struct drm_device *dev)
800 crt->adpa_reg = ADPA; 796 crt->adpa_reg = ADPA;
801 797
802 crt->base.compute_config = intel_crt_compute_config; 798 crt->base.compute_config = intel_crt_compute_config;
799 crt->base.mode_set = intel_crt_mode_set;
803 crt->base.disable = intel_disable_crt; 800 crt->base.disable = intel_disable_crt;
804 crt->base.enable = intel_enable_crt; 801 crt->base.enable = intel_enable_crt;
805 crt->base.get_config = intel_crt_get_config; 802 crt->base.get_config = intel_crt_get_config;
@@ -811,7 +808,6 @@ void intel_crt_init(struct drm_device *dev)
811 crt->base.get_hw_state = intel_crt_get_hw_state; 808 crt->base.get_hw_state = intel_crt_get_hw_state;
812 intel_connector->get_hw_state = intel_connector_get_hw_state; 809 intel_connector->get_hw_state = intel_connector_get_hw_state;
813 810
814 drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
815 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 811 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
816 812
817 drm_sysfs_connector_add(connector); 813 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 324211ac9c55..63aca49d11a8 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -84,25 +84,17 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
84 * in either FDI or DP modes only, as HDMI connections will work with both 84 * in either FDI or DP modes only, as HDMI connections will work with both
85 * of those 85 * of those
86 */ 86 */
87static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, 87static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
88 bool use_fdi_mode)
89{ 88{
90 struct drm_i915_private *dev_priv = dev->dev_private; 89 struct drm_i915_private *dev_priv = dev->dev_private;
91 u32 reg; 90 u32 reg;
92 int i; 91 int i;
93 const u32 *ddi_translations = ((use_fdi_mode) ? 92 const u32 *ddi_translations = (port == PORT_E) ?
94 hsw_ddi_translations_fdi : 93 hsw_ddi_translations_fdi :
95 hsw_ddi_translations_dp); 94 hsw_ddi_translations_dp;
96 95
97 DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n", 96 for (i = 0, reg = DDI_BUF_TRANS(port);
98 port_name(port), 97 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
99 use_fdi_mode ? "FDI" : "DP");
100
101 WARN((use_fdi_mode && (port != PORT_E)),
102 "Programming port %c in FDI mode, this probably will not work.\n",
103 port_name(port));
104
105 for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
106 I915_WRITE(reg, ddi_translations[i]); 98 I915_WRITE(reg, ddi_translations[i]);
107 reg += 4; 99 reg += 4;
108 } 100 }
@@ -118,14 +110,8 @@ void intel_prepare_ddi(struct drm_device *dev)
118 if (!HAS_DDI(dev)) 110 if (!HAS_DDI(dev))
119 return; 111 return;
120 112
121 for (port = PORT_A; port < PORT_E; port++) 113 for (port = PORT_A; port <= PORT_E; port++)
122 intel_prepare_ddi_buffers(dev, port, false); 114 intel_prepare_ddi_buffers(dev, port);
123
124 /* DDI E is the suggested one to work in FDI mode, so program is as such
125 * by default. It will have to be re-programmed in case a digital DP
126 * output will be detected on it
127 */
128 intel_prepare_ddi_buffers(dev, PORT_E, true);
129} 115}
130 116
131static const long hsw_ddi_buf_ctl_values[] = { 117static const long hsw_ddi_buf_ctl_values[] = {
@@ -281,43 +267,40 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
281 DRM_ERROR("FDI link training failed!\n"); 267 DRM_ERROR("FDI link training failed!\n");
282} 268}
283 269
284static void intel_ddi_mode_set(struct drm_encoder *encoder, 270static void intel_ddi_mode_set(struct intel_encoder *encoder)
285 struct drm_display_mode *mode,
286 struct drm_display_mode *adjusted_mode)
287{ 271{
288 struct drm_crtc *crtc = encoder->crtc; 272 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
289 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 273 int port = intel_ddi_get_encoder_port(encoder);
290 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 274 int pipe = crtc->pipe;
291 int port = intel_ddi_get_encoder_port(intel_encoder); 275 int type = encoder->type;
292 int pipe = intel_crtc->pipe; 276 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
293 int type = intel_encoder->type;
294 277
295 DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n", 278 DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
296 port_name(port), pipe_name(pipe)); 279 port_name(port), pipe_name(pipe));
297 280
298 intel_crtc->eld_vld = false; 281 crtc->eld_vld = false;
299 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 282 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
300 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 283 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
301 struct intel_digital_port *intel_dig_port = 284 struct intel_digital_port *intel_dig_port =
302 enc_to_dig_port(encoder); 285 enc_to_dig_port(&encoder->base);
303 286
304 intel_dp->DP = intel_dig_port->port_reversal | 287 intel_dp->DP = intel_dig_port->saved_port_bits |
305 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; 288 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
306 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); 289 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
307 290
308 if (intel_dp->has_audio) { 291 if (intel_dp->has_audio) {
309 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n", 292 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
310 pipe_name(intel_crtc->pipe)); 293 pipe_name(crtc->pipe));
311 294
312 /* write eld */ 295 /* write eld */
313 DRM_DEBUG_DRIVER("DP audio: write eld information\n"); 296 DRM_DEBUG_DRIVER("DP audio: write eld information\n");
314 intel_write_eld(encoder, adjusted_mode); 297 intel_write_eld(&encoder->base, adjusted_mode);
315 } 298 }
316 299
317 intel_dp_init_link_config(intel_dp); 300 intel_dp_init_link_config(intel_dp);
318 301
319 } else if (type == INTEL_OUTPUT_HDMI) { 302 } else if (type == INTEL_OUTPUT_HDMI) {
320 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 303 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
321 304
322 if (intel_hdmi->has_audio) { 305 if (intel_hdmi->has_audio) {
323 /* Proper support for digital audio needs a new logic 306 /* Proper support for digital audio needs a new logic
@@ -325,14 +308,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
325 * patch bombing. 308 * patch bombing.
326 */ 309 */
327 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", 310 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
328 pipe_name(intel_crtc->pipe)); 311 pipe_name(crtc->pipe));
329 312
330 /* write eld */ 313 /* write eld */
331 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); 314 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
332 intel_write_eld(encoder, adjusted_mode); 315 intel_write_eld(&encoder->base, adjusted_mode);
333 } 316 }
334 317
335 intel_hdmi->set_infoframes(encoder, adjusted_mode); 318 intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
336 } 319 }
337} 320}
338 321
@@ -1109,7 +1092,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1109 * enabling the port. 1092 * enabling the port.
1110 */ 1093 */
1111 I915_WRITE(DDI_BUF_CTL(port), 1094 I915_WRITE(DDI_BUF_CTL(port),
1112 intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); 1095 intel_dig_port->saved_port_bits |
1096 DDI_BUF_CTL_ENABLE);
1113 } else if (type == INTEL_OUTPUT_EDP) { 1097 } else if (type == INTEL_OUTPUT_EDP) {
1114 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1098 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1115 1099
@@ -1117,6 +1101,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1117 intel_dp_stop_link_train(intel_dp); 1101 intel_dp_stop_link_train(intel_dp);
1118 1102
1119 ironlake_edp_backlight_on(intel_dp); 1103 ironlake_edp_backlight_on(intel_dp);
1104 intel_edp_psr_enable(intel_dp);
1120 } 1105 }
1121 1106
1122 if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) { 1107 if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
@@ -1147,16 +1132,20 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1147 if (type == INTEL_OUTPUT_EDP) { 1132 if (type == INTEL_OUTPUT_EDP) {
1148 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1133 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1149 1134
1135 intel_edp_psr_disable(intel_dp);
1150 ironlake_edp_backlight_off(intel_dp); 1136 ironlake_edp_backlight_off(intel_dp);
1151 } 1137 }
1152} 1138}
1153 1139
1154int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1140int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1155{ 1141{
1156 if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) 1142 uint32_t lcpll = I915_READ(LCPLL_CTL);
1143
1144 if (lcpll & LCPLL_CD_SOURCE_FCLK)
1145 return 800000;
1146 else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
1157 return 450000; 1147 return 450000;
1158 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == 1148 else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
1159 LCPLL_CLK_FREQ_450)
1160 return 450000; 1149 return 450000;
1161 else if (IS_ULT(dev_priv->dev)) 1150 else if (IS_ULT(dev_priv->dev))
1162 return 337500; 1151 return 337500;
@@ -1308,10 +1297,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
1308 .destroy = intel_ddi_destroy, 1297 .destroy = intel_ddi_destroy,
1309}; 1298};
1310 1299
1311static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
1312 .mode_set = intel_ddi_mode_set,
1313};
1314
1315void intel_ddi_init(struct drm_device *dev, enum port port) 1300void intel_ddi_init(struct drm_device *dev, enum port port)
1316{ 1301{
1317 struct drm_i915_private *dev_priv = dev->dev_private; 1302 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1336,9 +1321,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1336 1321
1337 drm_encoder_init(dev, encoder, &intel_ddi_funcs, 1322 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
1338 DRM_MODE_ENCODER_TMDS); 1323 DRM_MODE_ENCODER_TMDS);
1339 drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
1340 1324
1341 intel_encoder->compute_config = intel_ddi_compute_config; 1325 intel_encoder->compute_config = intel_ddi_compute_config;
1326 intel_encoder->mode_set = intel_ddi_mode_set;
1342 intel_encoder->enable = intel_enable_ddi; 1327 intel_encoder->enable = intel_enable_ddi;
1343 intel_encoder->pre_enable = intel_ddi_pre_enable; 1328 intel_encoder->pre_enable = intel_ddi_pre_enable;
1344 intel_encoder->disable = intel_disable_ddi; 1329 intel_encoder->disable = intel_disable_ddi;
@@ -1347,8 +1332,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1347 intel_encoder->get_config = intel_ddi_get_config; 1332 intel_encoder->get_config = intel_ddi_get_config;
1348 1333
1349 intel_dig_port->port = port; 1334 intel_dig_port->port = port;
1350 intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & 1335 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
1351 DDI_BUF_PORT_REVERSAL; 1336 (DDI_BUF_PORT_REVERSAL |
1337 DDI_A_4_LANES);
1352 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); 1338 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
1353 1339
1354 intel_encoder->type = INTEL_OUTPUT_UNKNOWN; 1340 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 85f3eb74d2b7..38452d82ac7d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -45,6 +45,15 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
45static void intel_increase_pllclock(struct drm_crtc *crtc); 45static void intel_increase_pllclock(struct drm_crtc *crtc);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 47
48static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
49 struct intel_crtc_config *pipe_config);
50static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
51 struct intel_crtc_config *pipe_config);
52
53static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
54 int x, int y, struct drm_framebuffer *old_fb);
55
56
48typedef struct { 57typedef struct {
49 int min, max; 58 int min, max;
50} intel_range_t; 59} intel_range_t;
@@ -54,7 +63,6 @@ typedef struct {
54 int p2_slow, p2_fast; 63 int p2_slow, p2_fast;
55} intel_p2_t; 64} intel_p2_t;
56 65
57#define INTEL_P2_NUM 2
58typedef struct intel_limit intel_limit_t; 66typedef struct intel_limit intel_limit_t;
59struct intel_limit { 67struct intel_limit {
60 intel_range_t dot, vco, n, m, m1, m2, p, p1; 68 intel_range_t dot, vco, n, m, m1, m2, p, p1;
@@ -84,7 +92,7 @@ intel_fdi_link_freq(struct drm_device *dev)
84 return 27; 92 return 27;
85} 93}
86 94
87static const intel_limit_t intel_limits_i8xx_dvo = { 95static const intel_limit_t intel_limits_i8xx_dac = {
88 .dot = { .min = 25000, .max = 350000 }, 96 .dot = { .min = 25000, .max = 350000 },
89 .vco = { .min = 930000, .max = 1400000 }, 97 .vco = { .min = 930000, .max = 1400000 },
90 .n = { .min = 3, .max = 16 }, 98 .n = { .min = 3, .max = 16 },
@@ -97,6 +105,19 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
97 .p2_slow = 4, .p2_fast = 2 }, 105 .p2_slow = 4, .p2_fast = 2 },
98}; 106};
99 107
108static const intel_limit_t intel_limits_i8xx_dvo = {
109 .dot = { .min = 25000, .max = 350000 },
110 .vco = { .min = 930000, .max = 1400000 },
111 .n = { .min = 3, .max = 16 },
112 .m = { .min = 96, .max = 140 },
113 .m1 = { .min = 18, .max = 26 },
114 .m2 = { .min = 6, .max = 16 },
115 .p = { .min = 4, .max = 128 },
116 .p1 = { .min = 2, .max = 33 },
117 .p2 = { .dot_limit = 165000,
118 .p2_slow = 4, .p2_fast = 4 },
119};
120
100static const intel_limit_t intel_limits_i8xx_lvds = { 121static const intel_limit_t intel_limits_i8xx_lvds = {
101 .dot = { .min = 25000, .max = 350000 }, 122 .dot = { .min = 25000, .max = 350000 },
102 .vco = { .min = 930000, .max = 1400000 }, 123 .vco = { .min = 930000, .max = 1400000 },
@@ -405,8 +426,10 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
405 } else { 426 } else {
406 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 427 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
407 limit = &intel_limits_i8xx_lvds; 428 limit = &intel_limits_i8xx_lvds;
408 else 429 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
409 limit = &intel_limits_i8xx_dvo; 430 limit = &intel_limits_i8xx_dvo;
431 else
432 limit = &intel_limits_i8xx_dac;
410 } 433 }
411 return limit; 434 return limit;
412} 435}
@@ -667,7 +690,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
667{ 690{
668 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; 691 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
669 u32 m, n, fastclk; 692 u32 m, n, fastclk;
670 u32 updrate, minupdate, fracbits, p; 693 u32 updrate, minupdate, p;
671 unsigned long bestppm, ppm, absppm; 694 unsigned long bestppm, ppm, absppm;
672 int dotclk, flag; 695 int dotclk, flag;
673 696
@@ -678,7 +701,6 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
678 fastclk = dotclk / (2*100); 701 fastclk = dotclk / (2*100);
679 updrate = 0; 702 updrate = 0;
680 minupdate = 19200; 703 minupdate = 19200;
681 fracbits = 1;
682 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; 704 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
683 bestm1 = bestm2 = bestp1 = bestp2 = 0; 705 bestm1 = bestm2 = bestp1 = bestp2 = 0;
684 706
@@ -892,8 +914,8 @@ static const char *state_string(bool enabled)
892} 914}
893 915
894/* Only for pre-ILK configs */ 916/* Only for pre-ILK configs */
895static void assert_pll(struct drm_i915_private *dev_priv, 917void assert_pll(struct drm_i915_private *dev_priv,
896 enum pipe pipe, bool state) 918 enum pipe pipe, bool state)
897{ 919{
898 int reg; 920 int reg;
899 u32 val; 921 u32 val;
@@ -906,10 +928,8 @@ static void assert_pll(struct drm_i915_private *dev_priv,
906 "PLL state assertion failure (expected %s, current %s)\n", 928 "PLL state assertion failure (expected %s, current %s)\n",
907 state_string(state), state_string(cur_state)); 929 state_string(state), state_string(cur_state));
908} 930}
909#define assert_pll_enabled(d, p) assert_pll(d, p, true)
910#define assert_pll_disabled(d, p) assert_pll(d, p, false)
911 931
912static struct intel_shared_dpll * 932struct intel_shared_dpll *
913intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 933intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
914{ 934{
915 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 935 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -921,9 +941,9 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
921} 941}
922 942
923/* For ILK+ */ 943/* For ILK+ */
924static void assert_shared_dpll(struct drm_i915_private *dev_priv, 944void assert_shared_dpll(struct drm_i915_private *dev_priv,
925 struct intel_shared_dpll *pll, 945 struct intel_shared_dpll *pll,
926 bool state) 946 bool state)
927{ 947{
928 bool cur_state; 948 bool cur_state;
929 struct intel_dpll_hw_state hw_state; 949 struct intel_dpll_hw_state hw_state;
@@ -942,8 +962,6 @@ static void assert_shared_dpll(struct drm_i915_private *dev_priv,
942 "%s assertion failure (expected %s, current %s)\n", 962 "%s assertion failure (expected %s, current %s)\n",
943 pll->name, state_string(state), state_string(cur_state)); 963 pll->name, state_string(state), state_string(cur_state));
944} 964}
945#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
946#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
947 965
948static void assert_fdi_tx(struct drm_i915_private *dev_priv, 966static void assert_fdi_tx(struct drm_i915_private *dev_priv,
949 enum pipe pipe, bool state) 967 enum pipe pipe, bool state)
@@ -1007,15 +1025,19 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1007 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1025 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1008} 1026}
1009 1027
1010static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, 1028void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1011 enum pipe pipe) 1029 enum pipe pipe, bool state)
1012{ 1030{
1013 int reg; 1031 int reg;
1014 u32 val; 1032 u32 val;
1033 bool cur_state;
1015 1034
1016 reg = FDI_RX_CTL(pipe); 1035 reg = FDI_RX_CTL(pipe);
1017 val = I915_READ(reg); 1036 val = I915_READ(reg);
1018 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); 1037 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1038 WARN(cur_state != state,
1039 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1040 state_string(state), state_string(cur_state));
1019} 1041}
1020 1042
1021static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1043static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1111,7 +1133,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1111 } 1133 }
1112 1134
1113 /* Need to check both planes against the pipe */ 1135 /* Need to check both planes against the pipe */
1114 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { 1136 for_each_pipe(i) {
1115 reg = DSPCNTR(i); 1137 reg = DSPCNTR(i);
1116 val = I915_READ(reg); 1138 val = I915_READ(reg);
1117 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1139 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1301,51 +1323,92 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1301 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1323 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1302} 1324}
1303 1325
1304/** 1326static void vlv_enable_pll(struct intel_crtc *crtc)
1305 * intel_enable_pll - enable a PLL
1306 * @dev_priv: i915 private structure
1307 * @pipe: pipe PLL to enable
1308 *
1309 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1310 * make sure the PLL reg is writable first though, since the panel write
1311 * protect mechanism may be enabled.
1312 *
1313 * Note! This is for pre-ILK only.
1314 *
1315 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
1316 */
1317static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1318{ 1327{
1319 int reg; 1328 struct drm_device *dev = crtc->base.dev;
1320 u32 val; 1329 struct drm_i915_private *dev_priv = dev->dev_private;
1330 int reg = DPLL(crtc->pipe);
1331 u32 dpll = crtc->config.dpll_hw_state.dpll;
1321 1332
1322 assert_pipe_disabled(dev_priv, pipe); 1333 assert_pipe_disabled(dev_priv, crtc->pipe);
1323 1334
1324 /* No really, not for ILK+ */ 1335 /* No really, not for ILK+ */
1325 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); 1336 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1326 1337
1327 /* PLL is protected by panel, make sure we can write it */ 1338 /* PLL is protected by panel, make sure we can write it */
1328 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1339 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1329 assert_panel_unlocked(dev_priv, pipe); 1340 assert_panel_unlocked(dev_priv, crtc->pipe);
1330 1341
1331 reg = DPLL(pipe); 1342 I915_WRITE(reg, dpll);
1332 val = I915_READ(reg); 1343 POSTING_READ(reg);
1333 val |= DPLL_VCO_ENABLE; 1344 udelay(150);
1345
1346 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1347 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1348
1349 I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1350 POSTING_READ(DPLL_MD(crtc->pipe));
1334 1351
1335 /* We do this three times for luck */ 1352 /* We do this three times for luck */
1336 I915_WRITE(reg, val); 1353 I915_WRITE(reg, dpll);
1337 POSTING_READ(reg); 1354 POSTING_READ(reg);
1338 udelay(150); /* wait for warmup */ 1355 udelay(150); /* wait for warmup */
1339 I915_WRITE(reg, val); 1356 I915_WRITE(reg, dpll);
1340 POSTING_READ(reg); 1357 POSTING_READ(reg);
1341 udelay(150); /* wait for warmup */ 1358 udelay(150); /* wait for warmup */
1342 I915_WRITE(reg, val); 1359 I915_WRITE(reg, dpll);
1360 POSTING_READ(reg);
1361 udelay(150); /* wait for warmup */
1362}
1363
1364static void i9xx_enable_pll(struct intel_crtc *crtc)
1365{
1366 struct drm_device *dev = crtc->base.dev;
1367 struct drm_i915_private *dev_priv = dev->dev_private;
1368 int reg = DPLL(crtc->pipe);
1369 u32 dpll = crtc->config.dpll_hw_state.dpll;
1370
1371 assert_pipe_disabled(dev_priv, crtc->pipe);
1372
1373 /* No really, not for ILK+ */
1374 BUG_ON(dev_priv->info->gen >= 5);
1375
1376 /* PLL is protected by panel, make sure we can write it */
1377 if (IS_MOBILE(dev) && !IS_I830(dev))
1378 assert_panel_unlocked(dev_priv, crtc->pipe);
1379
1380 I915_WRITE(reg, dpll);
1381
1382 /* Wait for the clocks to stabilize. */
1383 POSTING_READ(reg);
1384 udelay(150);
1385
1386 if (INTEL_INFO(dev)->gen >= 4) {
1387 I915_WRITE(DPLL_MD(crtc->pipe),
1388 crtc->config.dpll_hw_state.dpll_md);
1389 } else {
1390 /* The pixel multiplier can only be updated once the
1391 * DPLL is enabled and the clocks are stable.
1392 *
1393 * So write it again.
1394 */
1395 I915_WRITE(reg, dpll);
1396 }
1397
1398 /* We do this three times for luck */
1399 I915_WRITE(reg, dpll);
1400 POSTING_READ(reg);
1401 udelay(150); /* wait for warmup */
1402 I915_WRITE(reg, dpll);
1403 POSTING_READ(reg);
1404 udelay(150); /* wait for warmup */
1405 I915_WRITE(reg, dpll);
1343 POSTING_READ(reg); 1406 POSTING_READ(reg);
1344 udelay(150); /* wait for warmup */ 1407 udelay(150); /* wait for warmup */
1345} 1408}
1346 1409
1347/** 1410/**
1348 * intel_disable_pll - disable a PLL 1411 * i9xx_disable_pll - disable a PLL
1349 * @dev_priv: i915 private structure 1412 * @dev_priv: i915 private structure
1350 * @pipe: pipe PLL to disable 1413 * @pipe: pipe PLL to disable
1351 * 1414 *
@@ -1353,11 +1416,8 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1353 * 1416 *
1354 * Note! This is for pre-ILK only. 1417 * Note! This is for pre-ILK only.
1355 */ 1418 */
1356static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1419static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1357{ 1420{
1358 int reg;
1359 u32 val;
1360
1361 /* Don't disable pipe A or pipe A PLLs if needed */ 1421 /* Don't disable pipe A or pipe A PLLs if needed */
1362 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1422 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1363 return; 1423 return;
@@ -1365,11 +1425,8 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1365 /* Make sure the pipe isn't still relying on us */ 1425 /* Make sure the pipe isn't still relying on us */
1366 assert_pipe_disabled(dev_priv, pipe); 1426 assert_pipe_disabled(dev_priv, pipe);
1367 1427
1368 reg = DPLL(pipe); 1428 I915_WRITE(DPLL(pipe), 0);
1369 val = I915_READ(reg); 1429 POSTING_READ(DPLL(pipe));
1370 val &= ~DPLL_VCO_ENABLE;
1371 I915_WRITE(reg, val);
1372 POSTING_READ(reg);
1373} 1430}
1374 1431
1375void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) 1432void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
@@ -1819,7 +1876,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1819 return 0; 1876 return 0;
1820 1877
1821err_unpin: 1878err_unpin:
1822 i915_gem_object_unpin(obj); 1879 i915_gem_object_unpin_from_display_plane(obj);
1823err_interruptible: 1880err_interruptible:
1824 dev_priv->mm.interruptible = true; 1881 dev_priv->mm.interruptible = true;
1825 return ret; 1882 return ret;
@@ -1828,7 +1885,7 @@ err_interruptible:
1828void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 1885void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1829{ 1886{
1830 i915_gem_object_unpin_fence(obj); 1887 i915_gem_object_unpin_fence(obj);
1831 i915_gem_object_unpin(obj); 1888 i915_gem_object_unpin_from_display_plane(obj);
1832} 1889}
1833 1890
1834/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 1891/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -1942,16 +1999,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1942 intel_crtc->dspaddr_offset = linear_offset; 1999 intel_crtc->dspaddr_offset = linear_offset;
1943 } 2000 }
1944 2001
1945 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2002 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1946 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); 2003 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2004 fb->pitches[0]);
1947 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2005 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1948 if (INTEL_INFO(dev)->gen >= 4) { 2006 if (INTEL_INFO(dev)->gen >= 4) {
1949 I915_MODIFY_DISPBASE(DSPSURF(plane), 2007 I915_MODIFY_DISPBASE(DSPSURF(plane),
1950 obj->gtt_offset + intel_crtc->dspaddr_offset); 2008 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
1951 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2009 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1952 I915_WRITE(DSPLINOFF(plane), linear_offset); 2010 I915_WRITE(DSPLINOFF(plane), linear_offset);
1953 } else 2011 } else
1954 I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); 2012 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
1955 POSTING_READ(reg); 2013 POSTING_READ(reg);
1956 2014
1957 return 0; 2015 return 0;
@@ -2031,11 +2089,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2031 fb->pitches[0]); 2089 fb->pitches[0]);
2032 linear_offset -= intel_crtc->dspaddr_offset; 2090 linear_offset -= intel_crtc->dspaddr_offset;
2033 2091
2034 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2092 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2035 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); 2093 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2094 fb->pitches[0]);
2036 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2095 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2037 I915_MODIFY_DISPBASE(DSPSURF(plane), 2096 I915_MODIFY_DISPBASE(DSPSURF(plane),
2038 obj->gtt_offset + intel_crtc->dspaddr_offset); 2097 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2039 if (IS_HASWELL(dev)) { 2098 if (IS_HASWELL(dev)) {
2040 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2099 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2041 } else { 2100 } else {
@@ -2183,6 +2242,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2183 return ret; 2242 return ret;
2184 } 2243 }
2185 2244
2245 /* Update pipe size and adjust fitter if needed */
2246 if (i915_fastboot) {
2247 I915_WRITE(PIPESRC(intel_crtc->pipe),
2248 ((crtc->mode.hdisplay - 1) << 16) |
2249 (crtc->mode.vdisplay - 1));
2250 if (!intel_crtc->config.pch_pfit.size &&
2251 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2252 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2253 I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2254 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2255 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2256 }
2257 }
2258
2186 ret = dev_priv->display.update_plane(crtc, fb, x, y); 2259 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2187 if (ret) { 2260 if (ret) {
2188 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); 2261 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2203,6 +2276,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2203 } 2276 }
2204 2277
2205 intel_update_fbc(dev); 2278 intel_update_fbc(dev);
2279 intel_edp_psr_update(dev);
2206 mutex_unlock(&dev->struct_mutex); 2280 mutex_unlock(&dev->struct_mutex);
2207 2281
2208 intel_crtc_update_sarea_pos(crtc, x, y); 2282 intel_crtc_update_sarea_pos(crtc, x, y);
@@ -2523,7 +2597,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2523 struct drm_i915_private *dev_priv = dev->dev_private; 2597 struct drm_i915_private *dev_priv = dev->dev_private;
2524 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2598 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2525 int pipe = intel_crtc->pipe; 2599 int pipe = intel_crtc->pipe;
2526 u32 reg, temp, i; 2600 u32 reg, temp, i, j;
2527 2601
2528 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2602 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2529 for train result */ 2603 for train result */
@@ -2539,97 +2613,99 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2539 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 2613 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2540 I915_READ(FDI_RX_IIR(pipe))); 2614 I915_READ(FDI_RX_IIR(pipe)));
2541 2615
2542 /* enable CPU FDI TX and PCH FDI RX */ 2616 /* Try each vswing and preemphasis setting twice before moving on */
2543 reg = FDI_TX_CTL(pipe); 2617 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
2544 temp = I915_READ(reg); 2618 /* disable first in case we need to retry */
2545 temp &= ~FDI_DP_PORT_WIDTH_MASK; 2619 reg = FDI_TX_CTL(pipe);
2546 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 2620 temp = I915_READ(reg);
2547 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 2621 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2548 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 2622 temp &= ~FDI_TX_ENABLE;
2549 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2623 I915_WRITE(reg, temp);
2550 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2551 temp |= FDI_COMPOSITE_SYNC;
2552 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2553
2554 I915_WRITE(FDI_RX_MISC(pipe),
2555 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2556
2557 reg = FDI_RX_CTL(pipe);
2558 temp = I915_READ(reg);
2559 temp &= ~FDI_LINK_TRAIN_AUTO;
2560 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2561 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2562 temp |= FDI_COMPOSITE_SYNC;
2563 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2564 2624
2565 POSTING_READ(reg); 2625 reg = FDI_RX_CTL(pipe);
2566 udelay(150); 2626 temp = I915_READ(reg);
2627 temp &= ~FDI_LINK_TRAIN_AUTO;
2628 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2629 temp &= ~FDI_RX_ENABLE;
2630 I915_WRITE(reg, temp);
2567 2631
2568 for (i = 0; i < 4; i++) { 2632 /* enable CPU FDI TX and PCH FDI RX */
2569 reg = FDI_TX_CTL(pipe); 2633 reg = FDI_TX_CTL(pipe);
2570 temp = I915_READ(reg); 2634 temp = I915_READ(reg);
2635 temp &= ~FDI_DP_PORT_WIDTH_MASK;
2636 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2637 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2571 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2638 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2572 temp |= snb_b_fdi_train_param[i]; 2639 temp |= snb_b_fdi_train_param[j/2];
2573 I915_WRITE(reg, temp); 2640 temp |= FDI_COMPOSITE_SYNC;
2641 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2574 2642
2575 POSTING_READ(reg); 2643 I915_WRITE(FDI_RX_MISC(pipe),
2576 udelay(500); 2644 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2577 2645
2578 reg = FDI_RX_IIR(pipe); 2646 reg = FDI_RX_CTL(pipe);
2579 temp = I915_READ(reg); 2647 temp = I915_READ(reg);
2580 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2648 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2581 2649 temp |= FDI_COMPOSITE_SYNC;
2582 if (temp & FDI_RX_BIT_LOCK || 2650 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2583 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2584 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2585 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
2586 break;
2587 }
2588 }
2589 if (i == 4)
2590 DRM_ERROR("FDI train 1 fail!\n");
2591 2651
2592 /* Train 2 */ 2652 POSTING_READ(reg);
2593 reg = FDI_TX_CTL(pipe); 2653 udelay(1); /* should be 0.5us */
2594 temp = I915_READ(reg);
2595 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2596 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2597 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2598 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2599 I915_WRITE(reg, temp);
2600 2654
2601 reg = FDI_RX_CTL(pipe); 2655 for (i = 0; i < 4; i++) {
2602 temp = I915_READ(reg); 2656 reg = FDI_RX_IIR(pipe);
2603 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2657 temp = I915_READ(reg);
2604 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2658 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2605 I915_WRITE(reg, temp);
2606 2659
2607 POSTING_READ(reg); 2660 if (temp & FDI_RX_BIT_LOCK ||
2608 udelay(150); 2661 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2662 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2663 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
2664 i);
2665 break;
2666 }
2667 udelay(1); /* should be 0.5us */
2668 }
2669 if (i == 4) {
2670 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
2671 continue;
2672 }
2609 2673
2610 for (i = 0; i < 4; i++) { 2674 /* Train 2 */
2611 reg = FDI_TX_CTL(pipe); 2675 reg = FDI_TX_CTL(pipe);
2612 temp = I915_READ(reg); 2676 temp = I915_READ(reg);
2613 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2677 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2614 temp |= snb_b_fdi_train_param[i]; 2678 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2679 I915_WRITE(reg, temp);
2680
2681 reg = FDI_RX_CTL(pipe);
2682 temp = I915_READ(reg);
2683 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2684 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2615 I915_WRITE(reg, temp); 2685 I915_WRITE(reg, temp);
2616 2686
2617 POSTING_READ(reg); 2687 POSTING_READ(reg);
2618 udelay(500); 2688 udelay(2); /* should be 1.5us */
2619 2689
2620 reg = FDI_RX_IIR(pipe); 2690 for (i = 0; i < 4; i++) {
2621 temp = I915_READ(reg); 2691 reg = FDI_RX_IIR(pipe);
2622 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2692 temp = I915_READ(reg);
2693 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2623 2694
2624 if (temp & FDI_RX_SYMBOL_LOCK) { 2695 if (temp & FDI_RX_SYMBOL_LOCK ||
2625 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2696 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2626 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); 2697 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2627 break; 2698 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
2699 i);
2700 goto train_done;
2701 }
2702 udelay(2); /* should be 1.5us */
2628 } 2703 }
2704 if (i == 4)
2705 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
2629 } 2706 }
2630 if (i == 4)
2631 DRM_ERROR("FDI train 2 fail!\n");
2632 2707
2708train_done:
2633 DRM_DEBUG_KMS("FDI train done.\n"); 2709 DRM_DEBUG_KMS("FDI train done.\n");
2634} 2710}
2635 2711
@@ -2927,15 +3003,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2927 /* For PCH output, training FDI link */ 3003 /* For PCH output, training FDI link */
2928 dev_priv->display.fdi_link_train(crtc); 3004 dev_priv->display.fdi_link_train(crtc);
2929 3005
2930 /* XXX: pch pll's can be enabled any time before we enable the PCH 3006 /* We need to program the right clock selection before writing the pixel
2931 * transcoder, and we actually should do this to not upset any PCH 3007 * mutliplier into the DPLL. */
2932 * transcoder that already use the clock when we share it.
2933 *
2934 * Note that enable_shared_dpll tries to do the right thing, but
2935 * get_shared_dpll unconditionally resets the pll - we need that to have
2936 * the right LVDS enable sequence. */
2937 ironlake_enable_shared_dpll(intel_crtc);
2938
2939 if (HAS_PCH_CPT(dev)) { 3008 if (HAS_PCH_CPT(dev)) {
2940 u32 sel; 3009 u32 sel;
2941 3010
@@ -2949,6 +3018,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2949 I915_WRITE(PCH_DPLL_SEL, temp); 3018 I915_WRITE(PCH_DPLL_SEL, temp);
2950 } 3019 }
2951 3020
3021 /* XXX: pch pll's can be enabled any time before we enable the PCH
3022 * transcoder, and we actually should do this to not upset any PCH
3023 * transcoder that already use the clock when we share it.
3024 *
3025 * Note that enable_shared_dpll tries to do the right thing, but
3026 * get_shared_dpll unconditionally resets the pll - we need that to have
3027 * the right LVDS enable sequence. */
3028 ironlake_enable_shared_dpll(intel_crtc);
3029
2952 /* set transcoder timing, panel must allow it */ 3030 /* set transcoder timing, panel must allow it */
2953 assert_panel_unlocked(dev_priv, pipe); 3031 assert_panel_unlocked(dev_priv, pipe);
2954 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 3032 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
@@ -3031,7 +3109,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
3031 crtc->config.shared_dpll = DPLL_ID_PRIVATE; 3109 crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3032} 3110}
3033 3111
3034static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp) 3112static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3035{ 3113{
3036 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3114 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3037 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3115 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3045,7 +3123,7 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
3045 3123
3046 if (HAS_PCH_IBX(dev_priv->dev)) { 3124 if (HAS_PCH_IBX(dev_priv->dev)) {
3047 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 3125 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3048 i = crtc->pipe; 3126 i = (enum intel_dpll_id) crtc->pipe;
3049 pll = &dev_priv->shared_dplls[i]; 3127 pll = &dev_priv->shared_dplls[i];
3050 3128
3051 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 3129 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
@@ -3061,8 +3139,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
3061 if (pll->refcount == 0) 3139 if (pll->refcount == 0)
3062 continue; 3140 continue;
3063 3141
3064 if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) && 3142 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3065 fp == I915_READ(PCH_FP0(pll->id))) { 3143 sizeof(pll->hw_state)) == 0) {
3066 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", 3144 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3067 crtc->base.base.id, 3145 crtc->base.base.id,
3068 pll->name, pll->refcount, pll->active); 3146 pll->name, pll->refcount, pll->active);
@@ -3096,13 +3174,7 @@ found:
3096 WARN_ON(pll->on); 3174 WARN_ON(pll->on);
3097 assert_shared_dpll_disabled(dev_priv, pll); 3175 assert_shared_dpll_disabled(dev_priv, pll);
3098 3176
3099 /* Wait for the clocks to stabilize before rewriting the regs */ 3177 pll->mode_set(dev_priv, pll);
3100 I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
3101 POSTING_READ(PCH_DPLL(pll->id));
3102 udelay(150);
3103
3104 I915_WRITE(PCH_FP0(pll->id), fp);
3105 I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
3106 } 3178 }
3107 pll->refcount++; 3179 pll->refcount++;
3108 3180
@@ -3174,7 +3246,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3174 struct intel_encoder *encoder; 3246 struct intel_encoder *encoder;
3175 int pipe = intel_crtc->pipe; 3247 int pipe = intel_crtc->pipe;
3176 int plane = intel_crtc->plane; 3248 int plane = intel_crtc->plane;
3177 u32 temp;
3178 3249
3179 WARN_ON(!crtc->enabled); 3250 WARN_ON(!crtc->enabled);
3180 3251
@@ -3188,12 +3259,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3188 3259
3189 intel_update_watermarks(dev); 3260 intel_update_watermarks(dev);
3190 3261
3191 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 3262 for_each_encoder_on_crtc(dev, crtc, encoder)
3192 temp = I915_READ(PCH_LVDS); 3263 if (encoder->pre_enable)
3193 if ((temp & LVDS_PORT_EN) == 0) 3264 encoder->pre_enable(encoder);
3194 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3195 }
3196
3197 3265
3198 if (intel_crtc->config.has_pch_encoder) { 3266 if (intel_crtc->config.has_pch_encoder) {
3199 /* Note: FDI PLL enabling _must_ be done before we enable the 3267 /* Note: FDI PLL enabling _must_ be done before we enable the
@@ -3205,10 +3273,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3205 assert_fdi_rx_disabled(dev_priv, pipe); 3273 assert_fdi_rx_disabled(dev_priv, pipe);
3206 } 3274 }
3207 3275
3208 for_each_encoder_on_crtc(dev, crtc, encoder)
3209 if (encoder->pre_enable)
3210 encoder->pre_enable(encoder);
3211
3212 ironlake_pfit_enable(intel_crtc); 3276 ironlake_pfit_enable(intel_crtc);
3213 3277
3214 /* 3278 /*
@@ -3389,7 +3453,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3389 intel_crtc_wait_for_pending_flips(crtc); 3453 intel_crtc_wait_for_pending_flips(crtc);
3390 drm_vblank_off(dev, pipe); 3454 drm_vblank_off(dev, pipe);
3391 3455
3392 if (dev_priv->cfb_plane == plane) 3456 if (dev_priv->fbc.plane == plane)
3393 intel_disable_fbc(dev); 3457 intel_disable_fbc(dev);
3394 3458
3395 intel_crtc_update_cursor(crtc, false); 3459 intel_crtc_update_cursor(crtc, false);
@@ -3462,7 +3526,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3462 drm_vblank_off(dev, pipe); 3526 drm_vblank_off(dev, pipe);
3463 3527
3464 /* FBC must be disabled before disabling the plane on HSW. */ 3528 /* FBC must be disabled before disabling the plane on HSW. */
3465 if (dev_priv->cfb_plane == plane) 3529 if (dev_priv->fbc.plane == plane)
3466 intel_disable_fbc(dev); 3530 intel_disable_fbc(dev);
3467 3531
3468 hsw_disable_ips(intel_crtc); 3532 hsw_disable_ips(intel_crtc);
@@ -3593,22 +3657,16 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3593 intel_crtc->active = true; 3657 intel_crtc->active = true;
3594 intel_update_watermarks(dev); 3658 intel_update_watermarks(dev);
3595 3659
3596 mutex_lock(&dev_priv->dpio_lock);
3597
3598 for_each_encoder_on_crtc(dev, crtc, encoder) 3660 for_each_encoder_on_crtc(dev, crtc, encoder)
3599 if (encoder->pre_pll_enable) 3661 if (encoder->pre_pll_enable)
3600 encoder->pre_pll_enable(encoder); 3662 encoder->pre_pll_enable(encoder);
3601 3663
3602 intel_enable_pll(dev_priv, pipe); 3664 vlv_enable_pll(intel_crtc);
3603 3665
3604 for_each_encoder_on_crtc(dev, crtc, encoder) 3666 for_each_encoder_on_crtc(dev, crtc, encoder)
3605 if (encoder->pre_enable) 3667 if (encoder->pre_enable)
3606 encoder->pre_enable(encoder); 3668 encoder->pre_enable(encoder);
3607 3669
3608 /* VLV wants encoder enabling _before_ the pipe is up. */
3609 for_each_encoder_on_crtc(dev, crtc, encoder)
3610 encoder->enable(encoder);
3611
3612 i9xx_pfit_enable(intel_crtc); 3670 i9xx_pfit_enable(intel_crtc);
3613 3671
3614 intel_crtc_load_lut(crtc); 3672 intel_crtc_load_lut(crtc);
@@ -3620,7 +3678,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3620 3678
3621 intel_update_fbc(dev); 3679 intel_update_fbc(dev);
3622 3680
3623 mutex_unlock(&dev_priv->dpio_lock); 3681 for_each_encoder_on_crtc(dev, crtc, encoder)
3682 encoder->enable(encoder);
3624} 3683}
3625 3684
3626static void i9xx_crtc_enable(struct drm_crtc *crtc) 3685static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -3640,12 +3699,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3640 intel_crtc->active = true; 3699 intel_crtc->active = true;
3641 intel_update_watermarks(dev); 3700 intel_update_watermarks(dev);
3642 3701
3643 intel_enable_pll(dev_priv, pipe);
3644
3645 for_each_encoder_on_crtc(dev, crtc, encoder) 3702 for_each_encoder_on_crtc(dev, crtc, encoder)
3646 if (encoder->pre_enable) 3703 if (encoder->pre_enable)
3647 encoder->pre_enable(encoder); 3704 encoder->pre_enable(encoder);
3648 3705
3706 i9xx_enable_pll(intel_crtc);
3707
3649 i9xx_pfit_enable(intel_crtc); 3708 i9xx_pfit_enable(intel_crtc);
3650 3709
3651 intel_crtc_load_lut(crtc); 3710 intel_crtc_load_lut(crtc);
@@ -3701,7 +3760,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3701 intel_crtc_wait_for_pending_flips(crtc); 3760 intel_crtc_wait_for_pending_flips(crtc);
3702 drm_vblank_off(dev, pipe); 3761 drm_vblank_off(dev, pipe);
3703 3762
3704 if (dev_priv->cfb_plane == plane) 3763 if (dev_priv->fbc.plane == plane)
3705 intel_disable_fbc(dev); 3764 intel_disable_fbc(dev);
3706 3765
3707 intel_crtc_dpms_overlay(intel_crtc, false); 3766 intel_crtc_dpms_overlay(intel_crtc, false);
@@ -3717,7 +3776,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3717 if (encoder->post_disable) 3776 if (encoder->post_disable)
3718 encoder->post_disable(encoder); 3777 encoder->post_disable(encoder);
3719 3778
3720 intel_disable_pll(dev_priv, pipe); 3779 i9xx_disable_pll(dev_priv, pipe);
3721 3780
3722 intel_crtc->active = false; 3781 intel_crtc->active = false;
3723 intel_update_fbc(dev); 3782 intel_update_fbc(dev);
@@ -3817,16 +3876,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
3817 } 3876 }
3818} 3877}
3819 3878
3820void intel_modeset_disable(struct drm_device *dev)
3821{
3822 struct drm_crtc *crtc;
3823
3824 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3825 if (crtc->enabled)
3826 intel_crtc_disable(crtc);
3827 }
3828}
3829
3830void intel_encoder_destroy(struct drm_encoder *encoder) 3879void intel_encoder_destroy(struct drm_encoder *encoder)
3831{ 3880{
3832 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3881 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -3835,10 +3884,10 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
3835 kfree(intel_encoder); 3884 kfree(intel_encoder);
3836} 3885}
3837 3886
3838/* Simple dpms helper for encodres with just one connector, no cloning and only 3887/* Simple dpms helper for encoders with just one connector, no cloning and only
3839 * one kind of off state. It clamps all !ON modes to fully OFF and changes the 3888 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3840 * state of the entire output pipe. */ 3889 * state of the entire output pipe. */
3841void intel_encoder_dpms(struct intel_encoder *encoder, int mode) 3890static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
3842{ 3891{
3843 if (mode == DRM_MODE_DPMS_ON) { 3892 if (mode == DRM_MODE_DPMS_ON) {
3844 encoder->connectors_active = true; 3893 encoder->connectors_active = true;
@@ -4032,7 +4081,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
4032{ 4081{
4033 pipe_config->ips_enabled = i915_enable_ips && 4082 pipe_config->ips_enabled = i915_enable_ips &&
4034 hsw_crtc_supports_ips(crtc) && 4083 hsw_crtc_supports_ips(crtc) &&
4035 pipe_config->pipe_bpp == 24; 4084 pipe_config->pipe_bpp <= 24;
4036} 4085}
4037 4086
4038static int intel_crtc_compute_config(struct intel_crtc *crtc, 4087static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -4048,12 +4097,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
4048 return -EINVAL; 4097 return -EINVAL;
4049 } 4098 }
4050 4099
4051 /* All interlaced capable intel hw wants timings in frames. Note though
4052 * that intel_lvds_mode_fixup does some funny tricks with the crtc
4053 * timings, so we need to be careful not to clobber these.*/
4054 if (!pipe_config->timings_set)
4055 drm_mode_set_crtcinfo(adjusted_mode, 0);
4056
4057 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 4100 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4058 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 4101 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4059 */ 4102 */
@@ -4103,6 +4146,30 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
4103 return 200000; 4146 return 200000;
4104} 4147}
4105 4148
4149static int pnv_get_display_clock_speed(struct drm_device *dev)
4150{
4151 u16 gcfgc = 0;
4152
4153 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4154
4155 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4156 case GC_DISPLAY_CLOCK_267_MHZ_PNV:
4157 return 267000;
4158 case GC_DISPLAY_CLOCK_333_MHZ_PNV:
4159 return 333000;
4160 case GC_DISPLAY_CLOCK_444_MHZ_PNV:
4161 return 444000;
4162 case GC_DISPLAY_CLOCK_200_MHZ_PNV:
4163 return 200000;
4164 default:
4165 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
4166 case GC_DISPLAY_CLOCK_133_MHZ_PNV:
4167 return 133000;
4168 case GC_DISPLAY_CLOCK_167_MHZ_PNV:
4169 return 167000;
4170 }
4171}
4172
4106static int i915gm_get_display_clock_speed(struct drm_device *dev) 4173static int i915gm_get_display_clock_speed(struct drm_device *dev)
4107{ 4174{
4108 u16 gcfgc = 0; 4175 u16 gcfgc = 0;
@@ -4266,14 +4333,17 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4266 } 4333 }
4267 4334
4268 I915_WRITE(FP0(pipe), fp); 4335 I915_WRITE(FP0(pipe), fp);
4336 crtc->config.dpll_hw_state.fp0 = fp;
4269 4337
4270 crtc->lowfreq_avail = false; 4338 crtc->lowfreq_avail = false;
4271 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 4339 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4272 reduced_clock && i915_powersave) { 4340 reduced_clock && i915_powersave) {
4273 I915_WRITE(FP1(pipe), fp2); 4341 I915_WRITE(FP1(pipe), fp2);
4342 crtc->config.dpll_hw_state.fp1 = fp2;
4274 crtc->lowfreq_avail = true; 4343 crtc->lowfreq_avail = true;
4275 } else { 4344 } else {
4276 I915_WRITE(FP1(pipe), fp); 4345 I915_WRITE(FP1(pipe), fp);
4346 crtc->config.dpll_hw_state.fp1 = fp;
4277 } 4347 }
4278} 4348}
4279 4349
@@ -4351,17 +4421,13 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4351{ 4421{
4352 struct drm_device *dev = crtc->base.dev; 4422 struct drm_device *dev = crtc->base.dev;
4353 struct drm_i915_private *dev_priv = dev->dev_private; 4423 struct drm_i915_private *dev_priv = dev->dev_private;
4354 struct intel_encoder *encoder;
4355 int pipe = crtc->pipe; 4424 int pipe = crtc->pipe;
4356 u32 dpll, mdiv; 4425 u32 dpll, mdiv;
4357 u32 bestn, bestm1, bestm2, bestp1, bestp2; 4426 u32 bestn, bestm1, bestm2, bestp1, bestp2;
4358 bool is_hdmi;
4359 u32 coreclk, reg_val, dpll_md; 4427 u32 coreclk, reg_val, dpll_md;
4360 4428
4361 mutex_lock(&dev_priv->dpio_lock); 4429 mutex_lock(&dev_priv->dpio_lock);
4362 4430
4363 is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
4364
4365 bestn = crtc->config.dpll.n; 4431 bestn = crtc->config.dpll.n;
4366 bestm1 = crtc->config.dpll.m1; 4432 bestm1 = crtc->config.dpll.m1;
4367 bestm2 = crtc->config.dpll.m2; 4433 bestm2 = crtc->config.dpll.m2;
@@ -4407,7 +4473,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4407 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 4473 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4408 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 4474 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4409 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4475 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4410 0x005f0021); 4476 0x009f0003);
4411 else 4477 else
4412 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4478 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4413 0x00d0000f); 4479 0x00d0000f);
@@ -4440,10 +4506,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4440 4506
4441 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000); 4507 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
4442 4508
4443 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4444 if (encoder->pre_pll_enable)
4445 encoder->pre_pll_enable(encoder);
4446
4447 /* Enable DPIO clock input */ 4509 /* Enable DPIO clock input */
4448 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4510 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4449 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4511 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
@@ -4451,17 +4513,11 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4451 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 4513 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
4452 4514
4453 dpll |= DPLL_VCO_ENABLE; 4515 dpll |= DPLL_VCO_ENABLE;
4454 I915_WRITE(DPLL(pipe), dpll); 4516 crtc->config.dpll_hw_state.dpll = dpll;
4455 POSTING_READ(DPLL(pipe));
4456 udelay(150);
4457
4458 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4459 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4460 4517
4461 dpll_md = (crtc->config.pixel_multiplier - 1) 4518 dpll_md = (crtc->config.pixel_multiplier - 1)
4462 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4519 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4463 I915_WRITE(DPLL_MD(pipe), dpll_md); 4520 crtc->config.dpll_hw_state.dpll_md = dpll_md;
4464 POSTING_READ(DPLL_MD(pipe));
4465 4521
4466 if (crtc->config.has_dp_encoder) 4522 if (crtc->config.has_dp_encoder)
4467 intel_dp_set_m_n(crtc); 4523 intel_dp_set_m_n(crtc);
@@ -4475,8 +4531,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4475{ 4531{
4476 struct drm_device *dev = crtc->base.dev; 4532 struct drm_device *dev = crtc->base.dev;
4477 struct drm_i915_private *dev_priv = dev->dev_private; 4533 struct drm_i915_private *dev_priv = dev->dev_private;
4478 struct intel_encoder *encoder;
4479 int pipe = crtc->pipe;
4480 u32 dpll; 4534 u32 dpll;
4481 bool is_sdvo; 4535 bool is_sdvo;
4482 struct dpll *clock = &crtc->config.dpll; 4536 struct dpll *clock = &crtc->config.dpll;
@@ -4499,10 +4553,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4499 } 4553 }
4500 4554
4501 if (is_sdvo) 4555 if (is_sdvo)
4502 dpll |= DPLL_DVO_HIGH_SPEED; 4556 dpll |= DPLL_SDVO_HIGH_SPEED;
4503 4557
4504 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) 4558 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
4505 dpll |= DPLL_DVO_HIGH_SPEED; 4559 dpll |= DPLL_SDVO_HIGH_SPEED;
4506 4560
4507 /* compute bitmask from p1 value */ 4561 /* compute bitmask from p1 value */
4508 if (IS_PINEVIEW(dev)) 4562 if (IS_PINEVIEW(dev))
@@ -4538,35 +4592,16 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4538 dpll |= PLL_REF_INPUT_DREFCLK; 4592 dpll |= PLL_REF_INPUT_DREFCLK;
4539 4593
4540 dpll |= DPLL_VCO_ENABLE; 4594 dpll |= DPLL_VCO_ENABLE;
4541 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4595 crtc->config.dpll_hw_state.dpll = dpll;
4542 POSTING_READ(DPLL(pipe));
4543 udelay(150);
4544
4545 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4546 if (encoder->pre_pll_enable)
4547 encoder->pre_pll_enable(encoder);
4548
4549 if (crtc->config.has_dp_encoder)
4550 intel_dp_set_m_n(crtc);
4551
4552 I915_WRITE(DPLL(pipe), dpll);
4553
4554 /* Wait for the clocks to stabilize. */
4555 POSTING_READ(DPLL(pipe));
4556 udelay(150);
4557 4596
4558 if (INTEL_INFO(dev)->gen >= 4) { 4597 if (INTEL_INFO(dev)->gen >= 4) {
4559 u32 dpll_md = (crtc->config.pixel_multiplier - 1) 4598 u32 dpll_md = (crtc->config.pixel_multiplier - 1)
4560 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4599 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4561 I915_WRITE(DPLL_MD(pipe), dpll_md); 4600 crtc->config.dpll_hw_state.dpll_md = dpll_md;
4562 } else {
4563 /* The pixel multiplier can only be updated once the
4564 * DPLL is enabled and the clocks are stable.
4565 *
4566 * So write it again.
4567 */
4568 I915_WRITE(DPLL(pipe), dpll);
4569 } 4601 }
4602
4603 if (crtc->config.has_dp_encoder)
4604 intel_dp_set_m_n(crtc);
4570} 4605}
4571 4606
4572static void i8xx_update_pll(struct intel_crtc *crtc, 4607static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -4575,8 +4610,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
4575{ 4610{
4576 struct drm_device *dev = crtc->base.dev; 4611 struct drm_device *dev = crtc->base.dev;
4577 struct drm_i915_private *dev_priv = dev->dev_private; 4612 struct drm_i915_private *dev_priv = dev->dev_private;
4578 struct intel_encoder *encoder;
4579 int pipe = crtc->pipe;
4580 u32 dpll; 4613 u32 dpll;
4581 struct dpll *clock = &crtc->config.dpll; 4614 struct dpll *clock = &crtc->config.dpll;
4582 4615
@@ -4595,6 +4628,9 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
4595 dpll |= PLL_P2_DIVIDE_BY_4; 4628 dpll |= PLL_P2_DIVIDE_BY_4;
4596 } 4629 }
4597 4630
4631 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
4632 dpll |= DPLL_DVO_2X_MODE;
4633
4598 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 4634 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4599 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4635 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4600 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4636 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
@@ -4602,26 +4638,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
4602 dpll |= PLL_REF_INPUT_DREFCLK; 4638 dpll |= PLL_REF_INPUT_DREFCLK;
4603 4639
4604 dpll |= DPLL_VCO_ENABLE; 4640 dpll |= DPLL_VCO_ENABLE;
4605 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4641 crtc->config.dpll_hw_state.dpll = dpll;
4606 POSTING_READ(DPLL(pipe));
4607 udelay(150);
4608
4609 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4610 if (encoder->pre_pll_enable)
4611 encoder->pre_pll_enable(encoder);
4612
4613 I915_WRITE(DPLL(pipe), dpll);
4614
4615 /* Wait for the clocks to stabilize. */
4616 POSTING_READ(DPLL(pipe));
4617 udelay(150);
4618
4619 /* The pixel multiplier can only be updated once the
4620 * DPLL is enabled and the clocks are stable.
4621 *
4622 * So write it again.
4623 */
4624 I915_WRITE(DPLL(pipe), dpll);
4625} 4642}
4626 4643
4627static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 4644static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -4727,6 +4744,27 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
4727 pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1; 4744 pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
4728} 4745}
4729 4746
4747static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4748 struct intel_crtc_config *pipe_config)
4749{
4750 struct drm_crtc *crtc = &intel_crtc->base;
4751
4752 crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
4753 crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
4754 crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
4755 crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
4756
4757 crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
4758 crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
4759 crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
4760 crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
4761
4762 crtc->mode.flags = pipe_config->adjusted_mode.flags;
4763
4764 crtc->mode.clock = pipe_config->adjusted_mode.clock;
4765 crtc->mode.flags |= pipe_config->adjusted_mode.flags;
4766}
4767
4730static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 4768static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4731{ 4769{
4732 struct drm_device *dev = intel_crtc->base.dev; 4770 struct drm_device *dev = intel_crtc->base.dev;
@@ -4913,22 +4951,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
4913 uint32_t tmp; 4951 uint32_t tmp;
4914 4952
4915 tmp = I915_READ(PFIT_CONTROL); 4953 tmp = I915_READ(PFIT_CONTROL);
4954 if (!(tmp & PFIT_ENABLE))
4955 return;
4916 4956
4957 /* Check whether the pfit is attached to our pipe. */
4917 if (INTEL_INFO(dev)->gen < 4) { 4958 if (INTEL_INFO(dev)->gen < 4) {
4918 if (crtc->pipe != PIPE_B) 4959 if (crtc->pipe != PIPE_B)
4919 return; 4960 return;
4920
4921 /* gen2/3 store dither state in pfit control, needs to match */
4922 pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE;
4923 } else { 4961 } else {
4924 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 4962 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4925 return; 4963 return;
4926 } 4964 }
4927 4965
4928 if (!(tmp & PFIT_ENABLE)) 4966 pipe_config->gmch_pfit.control = tmp;
4929 return;
4930
4931 pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL);
4932 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 4967 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
4933 if (INTEL_INFO(dev)->gen < 5) 4968 if (INTEL_INFO(dev)->gen < 5)
4934 pipe_config->gmch_pfit.lvds_border_bits = 4969 pipe_config->gmch_pfit.lvds_border_bits =
@@ -4942,7 +4977,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4942 struct drm_i915_private *dev_priv = dev->dev_private; 4977 struct drm_i915_private *dev_priv = dev->dev_private;
4943 uint32_t tmp; 4978 uint32_t tmp;
4944 4979
4945 pipe_config->cpu_transcoder = crtc->pipe; 4980 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4946 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 4981 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
4947 4982
4948 tmp = I915_READ(PIPECONF(crtc->pipe)); 4983 tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -4958,6 +4993,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4958 pipe_config->pixel_multiplier = 4993 pipe_config->pixel_multiplier =
4959 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 4994 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4960 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 4995 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4996 pipe_config->dpll_hw_state.dpll_md = tmp;
4961 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 4997 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
4962 tmp = I915_READ(DPLL(crtc->pipe)); 4998 tmp = I915_READ(DPLL(crtc->pipe));
4963 pipe_config->pixel_multiplier = 4999 pipe_config->pixel_multiplier =
@@ -4969,6 +5005,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4969 * function. */ 5005 * function. */
4970 pipe_config->pixel_multiplier = 1; 5006 pipe_config->pixel_multiplier = 1;
4971 } 5007 }
5008 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
5009 if (!IS_VALLEYVIEW(dev)) {
5010 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
5011 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
5012 } else {
5013 /* Mask out read-only status bits. */
5014 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5015 DPLL_PORTC_READY_MASK |
5016 DPLL_PORTB_READY_MASK);
5017 }
4972 5018
4973 return true; 5019 return true;
4974} 5020}
@@ -5122,74 +5168,37 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
5122 BUG_ON(val != final); 5168 BUG_ON(val != final);
5123} 5169}
5124 5170
5125/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ 5171static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5126static void lpt_init_pch_refclk(struct drm_device *dev)
5127{ 5172{
5128 struct drm_i915_private *dev_priv = dev->dev_private; 5173 uint32_t tmp;
5129 struct drm_mode_config *mode_config = &dev->mode_config;
5130 struct intel_encoder *encoder;
5131 bool has_vga = false;
5132 bool is_sdv = false;
5133 u32 tmp;
5134
5135 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5136 switch (encoder->type) {
5137 case INTEL_OUTPUT_ANALOG:
5138 has_vga = true;
5139 break;
5140 }
5141 }
5142
5143 if (!has_vga)
5144 return;
5145
5146 mutex_lock(&dev_priv->dpio_lock);
5147
5148 /* XXX: Rip out SDV support once Haswell ships for real. */
5149 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
5150 is_sdv = true;
5151
5152 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5153 tmp &= ~SBI_SSCCTL_DISABLE;
5154 tmp |= SBI_SSCCTL_PATHALT;
5155 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5156 5174
5157 udelay(24); 5175 tmp = I915_READ(SOUTH_CHICKEN2);
5176 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5177 I915_WRITE(SOUTH_CHICKEN2, tmp);
5158 5178
5159 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 5179 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5160 tmp &= ~SBI_SSCCTL_PATHALT; 5180 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5161 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 5181 DRM_ERROR("FDI mPHY reset assert timeout\n");
5162 5182
5163 if (!is_sdv) { 5183 tmp = I915_READ(SOUTH_CHICKEN2);
5164 tmp = I915_READ(SOUTH_CHICKEN2); 5184 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5165 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 5185 I915_WRITE(SOUTH_CHICKEN2, tmp);
5166 I915_WRITE(SOUTH_CHICKEN2, tmp);
5167 5186
5168 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 5187 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5169 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 5188 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5170 DRM_ERROR("FDI mPHY reset assert timeout\n"); 5189 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5171 5190}
5172 tmp = I915_READ(SOUTH_CHICKEN2);
5173 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5174 I915_WRITE(SOUTH_CHICKEN2, tmp);
5175 5191
5176 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 5192/* WaMPhyProgramming:hsw */
5177 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 5193static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5178 100)) 5194{
5179 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 5195 uint32_t tmp;
5180 }
5181 5196
5182 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 5197 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5183 tmp &= ~(0xFF << 24); 5198 tmp &= ~(0xFF << 24);
5184 tmp |= (0x12 << 24); 5199 tmp |= (0x12 << 24);
5185 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 5200 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5186 5201
5187 if (is_sdv) {
5188 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
5189 tmp |= 0x7FFF;
5190 intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
5191 }
5192
5193 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 5202 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5194 tmp |= (1 << 11); 5203 tmp |= (1 << 11);
5195 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 5204 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
@@ -5198,24 +5207,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
5198 tmp |= (1 << 11); 5207 tmp |= (1 << 11);
5199 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 5208 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5200 5209
5201 if (is_sdv) {
5202 tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
5203 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5204 intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
5205
5206 tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
5207 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5208 intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
5209
5210 tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
5211 tmp |= (0x3F << 8);
5212 intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
5213
5214 tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
5215 tmp |= (0x3F << 8);
5216 intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
5217 }
5218
5219 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 5210 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5220 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 5211 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5221 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 5212 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
@@ -5224,17 +5215,15 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
5224 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 5215 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5225 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 5216 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5226 5217
5227 if (!is_sdv) { 5218 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5228 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 5219 tmp &= ~(7 << 13);
5229 tmp &= ~(7 << 13); 5220 tmp |= (5 << 13);
5230 tmp |= (5 << 13); 5221 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5231 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5232 5222
5233 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 5223 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5234 tmp &= ~(7 << 13); 5224 tmp &= ~(7 << 13);
5235 tmp |= (5 << 13); 5225 tmp |= (5 << 13);
5236 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 5226 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5237 }
5238 5227
5239 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 5228 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5240 tmp &= ~0xFF; 5229 tmp &= ~0xFF;
@@ -5256,34 +5245,120 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
5256 tmp |= (0x1C << 16); 5245 tmp |= (0x1C << 16);
5257 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 5246 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5258 5247
5259 if (!is_sdv) { 5248 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5260 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 5249 tmp |= (1 << 27);
5261 tmp |= (1 << 27); 5250 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5262 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 5251
5252 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5253 tmp |= (1 << 27);
5254 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5255
5256 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5257 tmp &= ~(0xF << 28);
5258 tmp |= (4 << 28);
5259 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5260
5261 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5262 tmp &= ~(0xF << 28);
5263 tmp |= (4 << 28);
5264 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5265}
5266
5267/* Implements 3 different sequences from BSpec chapter "Display iCLK
5268 * Programming" based on the parameters passed:
5269 * - Sequence to enable CLKOUT_DP
5270 * - Sequence to enable CLKOUT_DP without spread
5271 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5272 */
5273static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
5274 bool with_fdi)
5275{
5276 struct drm_i915_private *dev_priv = dev->dev_private;
5277 uint32_t reg, tmp;
5263 5278
5264 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 5279 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
5265 tmp |= (1 << 27); 5280 with_spread = true;
5266 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 5281 if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
5282 with_fdi, "LP PCH doesn't have FDI\n"))
5283 with_fdi = false;
5267 5284
5268 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 5285 mutex_lock(&dev_priv->dpio_lock);
5269 tmp &= ~(0xF << 28); 5286
5270 tmp |= (4 << 28); 5287 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5271 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 5288 tmp &= ~SBI_SSCCTL_DISABLE;
5289 tmp |= SBI_SSCCTL_PATHALT;
5290 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5272 5291
5273 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 5292 udelay(24);
5274 tmp &= ~(0xF << 28); 5293
5275 tmp |= (4 << 28); 5294 if (with_spread) {
5276 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 5295 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5296 tmp &= ~SBI_SSCCTL_PATHALT;
5297 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5298
5299 if (with_fdi) {
5300 lpt_reset_fdi_mphy(dev_priv);
5301 lpt_program_fdi_mphy(dev_priv);
5302 }
5277 } 5303 }
5278 5304
5279 /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ 5305 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5280 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); 5306 SBI_GEN0 : SBI_DBUFF0;
5281 tmp |= SBI_DBUFF0_ENABLE; 5307 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5282 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); 5308 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5309 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5283 5310
5284 mutex_unlock(&dev_priv->dpio_lock); 5311 mutex_unlock(&dev_priv->dpio_lock);
5285} 5312}
5286 5313
5314/* Sequence to disable CLKOUT_DP */
5315static void lpt_disable_clkout_dp(struct drm_device *dev)
5316{
5317 struct drm_i915_private *dev_priv = dev->dev_private;
5318 uint32_t reg, tmp;
5319
5320 mutex_lock(&dev_priv->dpio_lock);
5321
5322 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5323 SBI_GEN0 : SBI_DBUFF0;
5324 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5325 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5326 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5327
5328 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5329 if (!(tmp & SBI_SSCCTL_DISABLE)) {
5330 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5331 tmp |= SBI_SSCCTL_PATHALT;
5332 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5333 udelay(32);
5334 }
5335 tmp |= SBI_SSCCTL_DISABLE;
5336 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5337 }
5338
5339 mutex_unlock(&dev_priv->dpio_lock);
5340}
5341
5342static void lpt_init_pch_refclk(struct drm_device *dev)
5343{
5344 struct drm_mode_config *mode_config = &dev->mode_config;
5345 struct intel_encoder *encoder;
5346 bool has_vga = false;
5347
5348 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5349 switch (encoder->type) {
5350 case INTEL_OUTPUT_ANALOG:
5351 has_vga = true;
5352 break;
5353 }
5354 }
5355
5356 if (has_vga)
5357 lpt_enable_clkout_dp(dev, true, true);
5358 else
5359 lpt_disable_clkout_dp(dev);
5360}
5361
5287/* 5362/*
5288 * Initialize reference clocks when the driver loads 5363 * Initialize reference clocks when the driver loads
5289 */ 5364 */
@@ -5613,9 +5688,9 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5613 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5688 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5614 5689
5615 if (is_sdvo) 5690 if (is_sdvo)
5616 dpll |= DPLL_DVO_HIGH_SPEED; 5691 dpll |= DPLL_SDVO_HIGH_SPEED;
5617 if (intel_crtc->config.has_dp_encoder) 5692 if (intel_crtc->config.has_dp_encoder)
5618 dpll |= DPLL_DVO_HIGH_SPEED; 5693 dpll |= DPLL_SDVO_HIGH_SPEED;
5619 5694
5620 /* compute bitmask from p1 value */ 5695 /* compute bitmask from p1 value */
5621 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5696 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -5711,7 +5786,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5711 else 5786 else
5712 intel_crtc->config.dpll_hw_state.fp1 = fp; 5787 intel_crtc->config.dpll_hw_state.fp1 = fp;
5713 5788
5714 pll = intel_get_shared_dpll(intel_crtc, dpll, fp); 5789 pll = intel_get_shared_dpll(intel_crtc);
5715 if (pll == NULL) { 5790 if (pll == NULL) {
5716 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 5791 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5717 pipe_name(pipe)); 5792 pipe_name(pipe));
@@ -5723,10 +5798,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5723 if (intel_crtc->config.has_dp_encoder) 5798 if (intel_crtc->config.has_dp_encoder)
5724 intel_dp_set_m_n(intel_crtc); 5799 intel_dp_set_m_n(intel_crtc);
5725 5800
5726 for_each_encoder_on_crtc(dev, crtc, encoder)
5727 if (encoder->pre_pll_enable)
5728 encoder->pre_pll_enable(encoder);
5729
5730 if (is_lvds && has_reduced_clock && i915_powersave) 5801 if (is_lvds && has_reduced_clock && i915_powersave)
5731 intel_crtc->lowfreq_avail = true; 5802 intel_crtc->lowfreq_avail = true;
5732 else 5803 else
@@ -5735,23 +5806,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5735 if (intel_crtc->config.has_pch_encoder) { 5806 if (intel_crtc->config.has_pch_encoder) {
5736 pll = intel_crtc_to_shared_dpll(intel_crtc); 5807 pll = intel_crtc_to_shared_dpll(intel_crtc);
5737 5808
5738 I915_WRITE(PCH_DPLL(pll->id), dpll);
5739
5740 /* Wait for the clocks to stabilize. */
5741 POSTING_READ(PCH_DPLL(pll->id));
5742 udelay(150);
5743
5744 /* The pixel multiplier can only be updated once the
5745 * DPLL is enabled and the clocks are stable.
5746 *
5747 * So write it again.
5748 */
5749 I915_WRITE(PCH_DPLL(pll->id), dpll);
5750
5751 if (has_reduced_clock)
5752 I915_WRITE(PCH_FP1(pll->id), fp2);
5753 else
5754 I915_WRITE(PCH_FP1(pll->id), fp);
5755 } 5809 }
5756 5810
5757 intel_set_pipe_timings(intel_crtc); 5811 intel_set_pipe_timings(intel_crtc);
@@ -5823,7 +5877,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5823 struct drm_i915_private *dev_priv = dev->dev_private; 5877 struct drm_i915_private *dev_priv = dev->dev_private;
5824 uint32_t tmp; 5878 uint32_t tmp;
5825 5879
5826 pipe_config->cpu_transcoder = crtc->pipe; 5880 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5827 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 5881 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5828 5882
5829 tmp = I915_READ(PIPECONF(crtc->pipe)); 5883 tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -5841,12 +5895,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5841 5895
5842 ironlake_get_fdi_m_n_config(crtc, pipe_config); 5896 ironlake_get_fdi_m_n_config(crtc, pipe_config);
5843 5897
5844 /* XXX: Can't properly read out the pch dpll pixel multiplier
5845 * since we don't have state tracking for pch clocks yet. */
5846 pipe_config->pixel_multiplier = 1;
5847
5848 if (HAS_PCH_IBX(dev_priv->dev)) { 5898 if (HAS_PCH_IBX(dev_priv->dev)) {
5849 pipe_config->shared_dpll = crtc->pipe; 5899 pipe_config->shared_dpll =
5900 (enum intel_dpll_id) crtc->pipe;
5850 } else { 5901 } else {
5851 tmp = I915_READ(PCH_DPLL_SEL); 5902 tmp = I915_READ(PCH_DPLL_SEL);
5852 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 5903 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
@@ -5859,6 +5910,11 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5859 5910
5860 WARN_ON(!pll->get_hw_state(dev_priv, pll, 5911 WARN_ON(!pll->get_hw_state(dev_priv, pll,
5861 &pipe_config->dpll_hw_state)); 5912 &pipe_config->dpll_hw_state));
5913
5914 tmp = pipe_config->dpll_hw_state.dpll;
5915 pipe_config->pixel_multiplier =
5916 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5917 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5862 } else { 5918 } else {
5863 pipe_config->pixel_multiplier = 1; 5919 pipe_config->pixel_multiplier = 1;
5864 } 5920 }
@@ -5870,6 +5926,305 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5870 return true; 5926 return true;
5871} 5927}
5872 5928
5929static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
5930{
5931 struct drm_device *dev = dev_priv->dev;
5932 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
5933 struct intel_crtc *crtc;
5934 unsigned long irqflags;
5935 uint32_t val;
5936
5937 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
5938 WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
5939 pipe_name(crtc->pipe));
5940
5941 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
5942 WARN(plls->spll_refcount, "SPLL enabled\n");
5943 WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
5944 WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
5945 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
5946 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
5947 "CPU PWM1 enabled\n");
5948 WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
5949 "CPU PWM2 enabled\n");
5950 WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
5951 "PCH PWM1 enabled\n");
5952 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
5953 "Utility pin enabled\n");
5954 WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
5955
5956 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5957 val = I915_READ(DEIMR);
5958 WARN((val & ~DE_PCH_EVENT_IVB) != val,
5959 "Unexpected DEIMR bits enabled: 0x%x\n", val);
5960 val = I915_READ(SDEIMR);
5961 WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
5962 "Unexpected SDEIMR bits enabled: 0x%x\n", val);
5963 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5964}
5965
5966/*
5967 * This function implements pieces of two sequences from BSpec:
5968 * - Sequence for display software to disable LCPLL
5969 * - Sequence for display software to allow package C8+
5970 * The steps implemented here are just the steps that actually touch the LCPLL
5971 * register. Callers should take care of disabling all the display engine
5972 * functions, doing the mode unset, fixing interrupts, etc.
5973 */
5974void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
5975 bool switch_to_fclk, bool allow_power_down)
5976{
5977 uint32_t val;
5978
5979 assert_can_disable_lcpll(dev_priv);
5980
5981 val = I915_READ(LCPLL_CTL);
5982
5983 if (switch_to_fclk) {
5984 val |= LCPLL_CD_SOURCE_FCLK;
5985 I915_WRITE(LCPLL_CTL, val);
5986
5987 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
5988 LCPLL_CD_SOURCE_FCLK_DONE, 1))
5989 DRM_ERROR("Switching to FCLK failed\n");
5990
5991 val = I915_READ(LCPLL_CTL);
5992 }
5993
5994 val |= LCPLL_PLL_DISABLE;
5995 I915_WRITE(LCPLL_CTL, val);
5996 POSTING_READ(LCPLL_CTL);
5997
5998 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
5999 DRM_ERROR("LCPLL still locked\n");
6000
6001 val = I915_READ(D_COMP);
6002 val |= D_COMP_COMP_DISABLE;
6003 I915_WRITE(D_COMP, val);
6004 POSTING_READ(D_COMP);
6005 ndelay(100);
6006
6007 if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
6008 DRM_ERROR("D_COMP RCOMP still in progress\n");
6009
6010 if (allow_power_down) {
6011 val = I915_READ(LCPLL_CTL);
6012 val |= LCPLL_POWER_DOWN_ALLOW;
6013 I915_WRITE(LCPLL_CTL, val);
6014 POSTING_READ(LCPLL_CTL);
6015 }
6016}
6017
6018/*
6019 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6020 * source.
6021 */
6022void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6023{
6024 uint32_t val;
6025
6026 val = I915_READ(LCPLL_CTL);
6027
6028 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
6029 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6030 return;
6031
6032 /* Make sure we're not on PC8 state before disabling PC8, otherwise
6033 * we'll hang the machine! */
6034 dev_priv->uncore.funcs.force_wake_get(dev_priv);
6035
6036 if (val & LCPLL_POWER_DOWN_ALLOW) {
6037 val &= ~LCPLL_POWER_DOWN_ALLOW;
6038 I915_WRITE(LCPLL_CTL, val);
6039 POSTING_READ(LCPLL_CTL);
6040 }
6041
6042 val = I915_READ(D_COMP);
6043 val |= D_COMP_COMP_FORCE;
6044 val &= ~D_COMP_COMP_DISABLE;
6045 I915_WRITE(D_COMP, val);
6046 POSTING_READ(D_COMP);
6047
6048 val = I915_READ(LCPLL_CTL);
6049 val &= ~LCPLL_PLL_DISABLE;
6050 I915_WRITE(LCPLL_CTL, val);
6051
6052 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
6053 DRM_ERROR("LCPLL not locked yet\n");
6054
6055 if (val & LCPLL_CD_SOURCE_FCLK) {
6056 val = I915_READ(LCPLL_CTL);
6057 val &= ~LCPLL_CD_SOURCE_FCLK;
6058 I915_WRITE(LCPLL_CTL, val);
6059
6060 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
6061 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
6062 DRM_ERROR("Switching back to LCPLL failed\n");
6063 }
6064
6065 dev_priv->uncore.funcs.force_wake_put(dev_priv);
6066}
6067
6068void hsw_enable_pc8_work(struct work_struct *__work)
6069{
6070 struct drm_i915_private *dev_priv =
6071 container_of(to_delayed_work(__work), struct drm_i915_private,
6072 pc8.enable_work);
6073 struct drm_device *dev = dev_priv->dev;
6074 uint32_t val;
6075
6076 if (dev_priv->pc8.enabled)
6077 return;
6078
6079 DRM_DEBUG_KMS("Enabling package C8+\n");
6080
6081 dev_priv->pc8.enabled = true;
6082
6083 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6084 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6085 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6086 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6087 }
6088
6089 lpt_disable_clkout_dp(dev);
6090 hsw_pc8_disable_interrupts(dev);
6091 hsw_disable_lcpll(dev_priv, true, true);
6092}
6093
6094static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6095{
6096 WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6097 WARN(dev_priv->pc8.disable_count < 1,
6098 "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6099
6100 dev_priv->pc8.disable_count--;
6101 if (dev_priv->pc8.disable_count != 0)
6102 return;
6103
6104 schedule_delayed_work(&dev_priv->pc8.enable_work,
6105 msecs_to_jiffies(i915_pc8_timeout));
6106}
6107
6108static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6109{
6110 struct drm_device *dev = dev_priv->dev;
6111 uint32_t val;
6112
6113 WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6114 WARN(dev_priv->pc8.disable_count < 0,
6115 "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6116
6117 dev_priv->pc8.disable_count++;
6118 if (dev_priv->pc8.disable_count != 1)
6119 return;
6120
6121 cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
6122 if (!dev_priv->pc8.enabled)
6123 return;
6124
6125 DRM_DEBUG_KMS("Disabling package C8+\n");
6126
6127 hsw_restore_lcpll(dev_priv);
6128 hsw_pc8_restore_interrupts(dev);
6129 lpt_init_pch_refclk(dev);
6130
6131 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6132 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6133 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
6134 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6135 }
6136
6137 intel_prepare_ddi(dev);
6138 i915_gem_init_swizzling(dev);
6139 mutex_lock(&dev_priv->rps.hw_lock);
6140 gen6_update_ring_freq(dev);
6141 mutex_unlock(&dev_priv->rps.hw_lock);
6142 dev_priv->pc8.enabled = false;
6143}
6144
6145void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6146{
6147 mutex_lock(&dev_priv->pc8.lock);
6148 __hsw_enable_package_c8(dev_priv);
6149 mutex_unlock(&dev_priv->pc8.lock);
6150}
6151
6152void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6153{
6154 mutex_lock(&dev_priv->pc8.lock);
6155 __hsw_disable_package_c8(dev_priv);
6156 mutex_unlock(&dev_priv->pc8.lock);
6157}
6158
6159static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6160{
6161 struct drm_device *dev = dev_priv->dev;
6162 struct intel_crtc *crtc;
6163 uint32_t val;
6164
6165 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6166 if (crtc->base.enabled)
6167 return false;
6168
6169 /* This case is still possible since we have the i915.disable_power_well
6170 * parameter and also the KVMr or something else might be requesting the
6171 * power well. */
6172 val = I915_READ(HSW_PWR_WELL_DRIVER);
6173 if (val != 0) {
6174 DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6175 return false;
6176 }
6177
6178 return true;
6179}
6180
6181/* Since we're called from modeset_global_resources there's no way to
6182 * symmetrically increase and decrease the refcount, so we use
6183 * dev_priv->pc8.requirements_met to track whether we already have the refcount
6184 * or not.
6185 */
6186static void hsw_update_package_c8(struct drm_device *dev)
6187{
6188 struct drm_i915_private *dev_priv = dev->dev_private;
6189 bool allow;
6190
6191 if (!i915_enable_pc8)
6192 return;
6193
6194 mutex_lock(&dev_priv->pc8.lock);
6195
6196 allow = hsw_can_enable_package_c8(dev_priv);
6197
6198 if (allow == dev_priv->pc8.requirements_met)
6199 goto done;
6200
6201 dev_priv->pc8.requirements_met = allow;
6202
6203 if (allow)
6204 __hsw_enable_package_c8(dev_priv);
6205 else
6206 __hsw_disable_package_c8(dev_priv);
6207
6208done:
6209 mutex_unlock(&dev_priv->pc8.lock);
6210}
6211
6212static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6213{
6214 if (!dev_priv->pc8.gpu_idle) {
6215 dev_priv->pc8.gpu_idle = true;
6216 hsw_enable_package_c8(dev_priv);
6217 }
6218}
6219
6220static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6221{
6222 if (dev_priv->pc8.gpu_idle) {
6223 dev_priv->pc8.gpu_idle = false;
6224 hsw_disable_package_c8(dev_priv);
6225 }
6226}
6227
5873static void haswell_modeset_global_resources(struct drm_device *dev) 6228static void haswell_modeset_global_resources(struct drm_device *dev)
5874{ 6229{
5875 bool enable = false; 6230 bool enable = false;
@@ -5885,6 +6240,8 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
5885 } 6240 }
5886 6241
5887 intel_set_power_well(dev, enable); 6242 intel_set_power_well(dev, enable);
6243
6244 hsw_update_package_c8(dev);
5888} 6245}
5889 6246
5890static int haswell_crtc_mode_set(struct drm_crtc *crtc, 6247static int haswell_crtc_mode_set(struct drm_crtc *crtc,
@@ -5938,7 +6295,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
5938 enum intel_display_power_domain pfit_domain; 6295 enum intel_display_power_domain pfit_domain;
5939 uint32_t tmp; 6296 uint32_t tmp;
5940 6297
5941 pipe_config->cpu_transcoder = crtc->pipe; 6298 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5942 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 6299 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5943 6300
5944 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 6301 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -6008,11 +6365,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
6008{ 6365{
6009 struct drm_device *dev = crtc->dev; 6366 struct drm_device *dev = crtc->dev;
6010 struct drm_i915_private *dev_priv = dev->dev_private; 6367 struct drm_i915_private *dev_priv = dev->dev_private;
6011 struct drm_encoder_helper_funcs *encoder_funcs;
6012 struct intel_encoder *encoder; 6368 struct intel_encoder *encoder;
6013 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6369 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6014 struct drm_display_mode *adjusted_mode =
6015 &intel_crtc->config.adjusted_mode;
6016 struct drm_display_mode *mode = &intel_crtc->config.requested_mode; 6370 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
6017 int pipe = intel_crtc->pipe; 6371 int pipe = intel_crtc->pipe;
6018 int ret; 6372 int ret;
@@ -6031,12 +6385,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
6031 encoder->base.base.id, 6385 encoder->base.base.id,
6032 drm_get_encoder_name(&encoder->base), 6386 drm_get_encoder_name(&encoder->base),
6033 mode->base.id, mode->name); 6387 mode->base.id, mode->name);
6034 if (encoder->mode_set) { 6388 encoder->mode_set(encoder);
6035 encoder->mode_set(encoder);
6036 } else {
6037 encoder_funcs = encoder->base.helper_private;
6038 encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
6039 }
6040 } 6389 }
6041 6390
6042 return 0; 6391 return 0;
@@ -6551,7 +6900,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6551 goto fail_unpin; 6900 goto fail_unpin;
6552 } 6901 }
6553 6902
6554 addr = obj->gtt_offset; 6903 addr = i915_gem_obj_ggtt_offset(obj);
6555 } else { 6904 } else {
6556 int align = IS_I830(dev) ? 16 * 1024 : 256; 6905 int align = IS_I830(dev) ? 16 * 1024 : 256;
6557 ret = i915_gem_attach_phys_object(dev, obj, 6906 ret = i915_gem_attach_phys_object(dev, obj,
@@ -6573,7 +6922,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6573 if (intel_crtc->cursor_bo != obj) 6922 if (intel_crtc->cursor_bo != obj)
6574 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 6923 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6575 } else 6924 } else
6576 i915_gem_object_unpin(intel_crtc->cursor_bo); 6925 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
6577 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 6926 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6578 } 6927 }
6579 6928
@@ -6588,7 +6937,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6588 6937
6589 return 0; 6938 return 0;
6590fail_unpin: 6939fail_unpin:
6591 i915_gem_object_unpin(obj); 6940 i915_gem_object_unpin_from_display_plane(obj);
6592fail_locked: 6941fail_locked:
6593 mutex_unlock(&dev->struct_mutex); 6942 mutex_unlock(&dev->struct_mutex);
6594fail: 6943fail:
@@ -6878,11 +7227,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
6878} 7227}
6879 7228
6880/* Returns the clock of the currently programmed mode of the given pipe. */ 7229/* Returns the clock of the currently programmed mode of the given pipe. */
6881static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) 7230static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7231 struct intel_crtc_config *pipe_config)
6882{ 7232{
7233 struct drm_device *dev = crtc->base.dev;
6883 struct drm_i915_private *dev_priv = dev->dev_private; 7234 struct drm_i915_private *dev_priv = dev->dev_private;
6884 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7235 int pipe = pipe_config->cpu_transcoder;
6885 int pipe = intel_crtc->pipe;
6886 u32 dpll = I915_READ(DPLL(pipe)); 7236 u32 dpll = I915_READ(DPLL(pipe));
6887 u32 fp; 7237 u32 fp;
6888 intel_clock_t clock; 7238 intel_clock_t clock;
@@ -6921,7 +7271,8 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6921 default: 7271 default:
6922 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 7272 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6923 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 7273 "mode\n", (int)(dpll & DPLL_MODE_MASK));
6924 return 0; 7274 pipe_config->adjusted_mode.clock = 0;
7275 return;
6925 } 7276 }
6926 7277
6927 if (IS_PINEVIEW(dev)) 7278 if (IS_PINEVIEW(dev))
@@ -6958,12 +7309,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6958 } 7309 }
6959 } 7310 }
6960 7311
6961 /* XXX: It would be nice to validate the clocks, but we can't reuse 7312 pipe_config->adjusted_mode.clock = clock.dot *
6962 * i830PllIsValid() because it relies on the xf86_config connector 7313 pipe_config->pixel_multiplier;
6963 * configuration being accurate, which it isn't necessarily. 7314}
7315
7316static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
7317 struct intel_crtc_config *pipe_config)
7318{
7319 struct drm_device *dev = crtc->base.dev;
7320 struct drm_i915_private *dev_priv = dev->dev_private;
7321 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7322 int link_freq, repeat;
7323 u64 clock;
7324 u32 link_m, link_n;
7325
7326 repeat = pipe_config->pixel_multiplier;
7327
7328 /*
7329 * The calculation for the data clock is:
7330 * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
7331 * But we want to avoid losing precison if possible, so:
7332 * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
7333 *
7334 * and the link clock is simpler:
7335 * link_clock = (m * link_clock * repeat) / n
6964 */ 7336 */
6965 7337
6966 return clock.dot; 7338 /*
7339 * We need to get the FDI or DP link clock here to derive
7340 * the M/N dividers.
7341 *
7342 * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
7343 * For DP, it's either 1.62GHz or 2.7GHz.
7344 * We do our calculations in 10*MHz since we don't need much precison.
7345 */
7346 if (pipe_config->has_pch_encoder)
7347 link_freq = intel_fdi_link_freq(dev) * 10000;
7348 else
7349 link_freq = pipe_config->port_clock;
7350
7351 link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
7352 link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
7353
7354 if (!link_m || !link_n)
7355 return;
7356
7357 clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
7358 do_div(clock, link_n);
7359
7360 pipe_config->adjusted_mode.clock = clock;
6967} 7361}
6968 7362
6969/** Returns the currently programmed mode of the given pipe. */ 7363/** Returns the currently programmed mode of the given pipe. */
@@ -6974,6 +7368,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6974 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6975 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 7369 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6976 struct drm_display_mode *mode; 7370 struct drm_display_mode *mode;
7371 struct intel_crtc_config pipe_config;
6977 int htot = I915_READ(HTOTAL(cpu_transcoder)); 7372 int htot = I915_READ(HTOTAL(cpu_transcoder));
6978 int hsync = I915_READ(HSYNC(cpu_transcoder)); 7373 int hsync = I915_READ(HSYNC(cpu_transcoder));
6979 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 7374 int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -6983,7 +7378,18 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6983 if (!mode) 7378 if (!mode)
6984 return NULL; 7379 return NULL;
6985 7380
6986 mode->clock = intel_crtc_clock_get(dev, crtc); 7381 /*
7382 * Construct a pipe_config sufficient for getting the clock info
7383 * back out of crtc_clock_get.
7384 *
7385 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
7386 * to use a real value here instead.
7387 */
7388 pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
7389 pipe_config.pixel_multiplier = 1;
7390 i9xx_crtc_clock_get(intel_crtc, &pipe_config);
7391
7392 mode->clock = pipe_config.adjusted_mode.clock;
6987 mode->hdisplay = (htot & 0xffff) + 1; 7393 mode->hdisplay = (htot & 0xffff) + 1;
6988 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 7394 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6989 mode->hsync_start = (hsync & 0xffff) + 1; 7395 mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7067,13 +7473,19 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7067 7473
7068void intel_mark_busy(struct drm_device *dev) 7474void intel_mark_busy(struct drm_device *dev)
7069{ 7475{
7070 i915_update_gfx_val(dev->dev_private); 7476 struct drm_i915_private *dev_priv = dev->dev_private;
7477
7478 hsw_package_c8_gpu_busy(dev_priv);
7479 i915_update_gfx_val(dev_priv);
7071} 7480}
7072 7481
7073void intel_mark_idle(struct drm_device *dev) 7482void intel_mark_idle(struct drm_device *dev)
7074{ 7483{
7484 struct drm_i915_private *dev_priv = dev->dev_private;
7075 struct drm_crtc *crtc; 7485 struct drm_crtc *crtc;
7076 7486
7487 hsw_package_c8_gpu_idle(dev_priv);
7488
7077 if (!i915_powersave) 7489 if (!i915_powersave)
7078 return; 7490 return;
7079 7491
@@ -7238,7 +7650,8 @@ inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7238static int intel_gen2_queue_flip(struct drm_device *dev, 7650static int intel_gen2_queue_flip(struct drm_device *dev,
7239 struct drm_crtc *crtc, 7651 struct drm_crtc *crtc,
7240 struct drm_framebuffer *fb, 7652 struct drm_framebuffer *fb,
7241 struct drm_i915_gem_object *obj) 7653 struct drm_i915_gem_object *obj,
7654 uint32_t flags)
7242{ 7655{
7243 struct drm_i915_private *dev_priv = dev->dev_private; 7656 struct drm_i915_private *dev_priv = dev->dev_private;
7244 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7657 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7266,7 +7679,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7266 intel_ring_emit(ring, MI_DISPLAY_FLIP | 7679 intel_ring_emit(ring, MI_DISPLAY_FLIP |
7267 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7680 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7268 intel_ring_emit(ring, fb->pitches[0]); 7681 intel_ring_emit(ring, fb->pitches[0]);
7269 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7682 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7270 intel_ring_emit(ring, 0); /* aux display base address, unused */ 7683 intel_ring_emit(ring, 0); /* aux display base address, unused */
7271 7684
7272 intel_mark_page_flip_active(intel_crtc); 7685 intel_mark_page_flip_active(intel_crtc);
@@ -7282,7 +7695,8 @@ err:
7282static int intel_gen3_queue_flip(struct drm_device *dev, 7695static int intel_gen3_queue_flip(struct drm_device *dev,
7283 struct drm_crtc *crtc, 7696 struct drm_crtc *crtc,
7284 struct drm_framebuffer *fb, 7697 struct drm_framebuffer *fb,
7285 struct drm_i915_gem_object *obj) 7698 struct drm_i915_gem_object *obj,
7699 uint32_t flags)
7286{ 7700{
7287 struct drm_i915_private *dev_priv = dev->dev_private; 7701 struct drm_i915_private *dev_priv = dev->dev_private;
7288 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7307,7 +7721,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7307 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 7721 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
7308 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7722 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7309 intel_ring_emit(ring, fb->pitches[0]); 7723 intel_ring_emit(ring, fb->pitches[0]);
7310 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7724 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7311 intel_ring_emit(ring, MI_NOOP); 7725 intel_ring_emit(ring, MI_NOOP);
7312 7726
7313 intel_mark_page_flip_active(intel_crtc); 7727 intel_mark_page_flip_active(intel_crtc);
@@ -7323,7 +7737,8 @@ err:
7323static int intel_gen4_queue_flip(struct drm_device *dev, 7737static int intel_gen4_queue_flip(struct drm_device *dev,
7324 struct drm_crtc *crtc, 7738 struct drm_crtc *crtc,
7325 struct drm_framebuffer *fb, 7739 struct drm_framebuffer *fb,
7326 struct drm_i915_gem_object *obj) 7740 struct drm_i915_gem_object *obj,
7741 uint32_t flags)
7327{ 7742{
7328 struct drm_i915_private *dev_priv = dev->dev_private; 7743 struct drm_i915_private *dev_priv = dev->dev_private;
7329 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7744 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7347,7 +7762,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7347 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7762 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7348 intel_ring_emit(ring, fb->pitches[0]); 7763 intel_ring_emit(ring, fb->pitches[0]);
7349 intel_ring_emit(ring, 7764 intel_ring_emit(ring,
7350 (obj->gtt_offset + intel_crtc->dspaddr_offset) | 7765 (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
7351 obj->tiling_mode); 7766 obj->tiling_mode);
7352 7767
7353 /* XXX Enabling the panel-fitter across page-flip is so far 7768 /* XXX Enabling the panel-fitter across page-flip is so far
@@ -7371,7 +7786,8 @@ err:
7371static int intel_gen6_queue_flip(struct drm_device *dev, 7786static int intel_gen6_queue_flip(struct drm_device *dev,
7372 struct drm_crtc *crtc, 7787 struct drm_crtc *crtc,
7373 struct drm_framebuffer *fb, 7788 struct drm_framebuffer *fb,
7374 struct drm_i915_gem_object *obj) 7789 struct drm_i915_gem_object *obj,
7790 uint32_t flags)
7375{ 7791{
7376 struct drm_i915_private *dev_priv = dev->dev_private; 7792 struct drm_i915_private *dev_priv = dev->dev_private;
7377 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7793 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7390,7 +7806,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7390 intel_ring_emit(ring, MI_DISPLAY_FLIP | 7806 intel_ring_emit(ring, MI_DISPLAY_FLIP |
7391 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7807 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7392 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 7808 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
7393 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7809 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7394 7810
7395 /* Contrary to the suggestions in the documentation, 7811 /* Contrary to the suggestions in the documentation,
7396 * "Enable Panel Fitter" does not seem to be required when page 7812 * "Enable Panel Fitter" does not seem to be required when page
@@ -7421,7 +7837,8 @@ err:
7421static int intel_gen7_queue_flip(struct drm_device *dev, 7837static int intel_gen7_queue_flip(struct drm_device *dev,
7422 struct drm_crtc *crtc, 7838 struct drm_crtc *crtc,
7423 struct drm_framebuffer *fb, 7839 struct drm_framebuffer *fb,
7424 struct drm_i915_gem_object *obj) 7840 struct drm_i915_gem_object *obj,
7841 uint32_t flags)
7425{ 7842{
7426 struct drm_i915_private *dev_priv = dev->dev_private; 7843 struct drm_i915_private *dev_priv = dev->dev_private;
7427 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7455,7 +7872,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7455 7872
7456 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 7873 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7457 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 7874 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7458 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7875 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7459 intel_ring_emit(ring, (MI_NOOP)); 7876 intel_ring_emit(ring, (MI_NOOP));
7460 7877
7461 intel_mark_page_flip_active(intel_crtc); 7878 intel_mark_page_flip_active(intel_crtc);
@@ -7471,14 +7888,16 @@ err:
7471static int intel_default_queue_flip(struct drm_device *dev, 7888static int intel_default_queue_flip(struct drm_device *dev,
7472 struct drm_crtc *crtc, 7889 struct drm_crtc *crtc,
7473 struct drm_framebuffer *fb, 7890 struct drm_framebuffer *fb,
7474 struct drm_i915_gem_object *obj) 7891 struct drm_i915_gem_object *obj,
7892 uint32_t flags)
7475{ 7893{
7476 return -ENODEV; 7894 return -ENODEV;
7477} 7895}
7478 7896
7479static int intel_crtc_page_flip(struct drm_crtc *crtc, 7897static int intel_crtc_page_flip(struct drm_crtc *crtc,
7480 struct drm_framebuffer *fb, 7898 struct drm_framebuffer *fb,
7481 struct drm_pending_vblank_event *event) 7899 struct drm_pending_vblank_event *event,
7900 uint32_t page_flip_flags)
7482{ 7901{
7483 struct drm_device *dev = crtc->dev; 7902 struct drm_device *dev = crtc->dev;
7484 struct drm_i915_private *dev_priv = dev->dev_private; 7903 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7548,7 +7967,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7548 atomic_inc(&intel_crtc->unpin_work_count); 7967 atomic_inc(&intel_crtc->unpin_work_count);
7549 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 7968 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
7550 7969
7551 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7970 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
7552 if (ret) 7971 if (ret)
7553 goto cleanup_pending; 7972 goto cleanup_pending;
7554 7973
@@ -7792,7 +8211,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
7792 struct drm_display_mode *mode) 8211 struct drm_display_mode *mode)
7793{ 8212{
7794 struct drm_device *dev = crtc->dev; 8213 struct drm_device *dev = crtc->dev;
7795 struct drm_encoder_helper_funcs *encoder_funcs;
7796 struct intel_encoder *encoder; 8214 struct intel_encoder *encoder;
7797 struct intel_crtc_config *pipe_config; 8215 struct intel_crtc_config *pipe_config;
7798 int plane_bpp, ret = -EINVAL; 8216 int plane_bpp, ret = -EINVAL;
@@ -7809,9 +8227,23 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
7809 8227
7810 drm_mode_copy(&pipe_config->adjusted_mode, mode); 8228 drm_mode_copy(&pipe_config->adjusted_mode, mode);
7811 drm_mode_copy(&pipe_config->requested_mode, mode); 8229 drm_mode_copy(&pipe_config->requested_mode, mode);
7812 pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe; 8230 pipe_config->cpu_transcoder =
8231 (enum transcoder) to_intel_crtc(crtc)->pipe;
7813 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8232 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7814 8233
8234 /*
8235 * Sanitize sync polarity flags based on requested ones. If neither
8236 * positive or negative polarity is requested, treat this as meaning
8237 * negative polarity.
8238 */
8239 if (!(pipe_config->adjusted_mode.flags &
8240 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8241 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8242
8243 if (!(pipe_config->adjusted_mode.flags &
8244 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8245 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8246
7815 /* Compute a starting value for pipe_config->pipe_bpp taking the source 8247 /* Compute a starting value for pipe_config->pipe_bpp taking the source
7816 * plane pixel format and any sink constraints into account. Returns the 8248 * plane pixel format and any sink constraints into account. Returns the
7817 * source plane bpp so that dithering can be selected on mismatches 8249 * source plane bpp so that dithering can be selected on mismatches
@@ -7826,6 +8258,9 @@ encoder_retry:
7826 pipe_config->port_clock = 0; 8258 pipe_config->port_clock = 0;
7827 pipe_config->pixel_multiplier = 1; 8259 pipe_config->pixel_multiplier = 1;
7828 8260
8261 /* Fill in default crtc timings, allow encoders to overwrite them. */
8262 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
8263
7829 /* Pass our mode to the connectors and the CRTC to give them a chance to 8264 /* Pass our mode to the connectors and the CRTC to give them a chance to
7830 * adjust it according to limitations or connector properties, and also 8265 * adjust it according to limitations or connector properties, and also
7831 * a chance to reject the mode entirely. 8266 * a chance to reject the mode entirely.
@@ -7836,20 +8271,8 @@ encoder_retry:
7836 if (&encoder->new_crtc->base != crtc) 8271 if (&encoder->new_crtc->base != crtc)
7837 continue; 8272 continue;
7838 8273
7839 if (encoder->compute_config) { 8274 if (!(encoder->compute_config(encoder, pipe_config))) {
7840 if (!(encoder->compute_config(encoder, pipe_config))) { 8275 DRM_DEBUG_KMS("Encoder config failure\n");
7841 DRM_DEBUG_KMS("Encoder config failure\n");
7842 goto fail;
7843 }
7844
7845 continue;
7846 }
7847
7848 encoder_funcs = encoder->base.helper_private;
7849 if (!(encoder_funcs->mode_fixup(&encoder->base,
7850 &pipe_config->requested_mode,
7851 &pipe_config->adjusted_mode))) {
7852 DRM_DEBUG_KMS("Encoder fixup failed\n");
7853 goto fail; 8276 goto fail;
7854 } 8277 }
7855 } 8278 }
@@ -8044,6 +8467,28 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
8044 8467
8045} 8468}
8046 8469
8470static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
8471 struct intel_crtc_config *new)
8472{
8473 int clock1, clock2, diff;
8474
8475 clock1 = cur->adjusted_mode.clock;
8476 clock2 = new->adjusted_mode.clock;
8477
8478 if (clock1 == clock2)
8479 return true;
8480
8481 if (!clock1 || !clock2)
8482 return false;
8483
8484 diff = abs(clock1 - clock2);
8485
8486 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8487 return true;
8488
8489 return false;
8490}
8491
8047#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 8492#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
8048 list_for_each_entry((intel_crtc), \ 8493 list_for_each_entry((intel_crtc), \
8049 &(dev)->mode_config.crtc_list, \ 8494 &(dev)->mode_config.crtc_list, \
@@ -8075,7 +8520,7 @@ intel_pipe_config_compare(struct drm_device *dev,
8075 8520
8076#define PIPE_CONF_CHECK_FLAGS(name, mask) \ 8521#define PIPE_CONF_CHECK_FLAGS(name, mask) \
8077 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 8522 if ((current_config->name ^ pipe_config->name) & (mask)) { \
8078 DRM_ERROR("mismatch in " #name " " \ 8523 DRM_ERROR("mismatch in " #name "(" #mask ") " \
8079 "(expected %i, found %i)\n", \ 8524 "(expected %i, found %i)\n", \
8080 current_config->name & (mask), \ 8525 current_config->name & (mask), \
8081 pipe_config->name & (mask)); \ 8526 pipe_config->name & (mask)); \
@@ -8109,8 +8554,7 @@ intel_pipe_config_compare(struct drm_device *dev,
8109 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start); 8554 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
8110 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); 8555 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
8111 8556
8112 if (!HAS_PCH_SPLIT(dev)) 8557 PIPE_CONF_CHECK_I(pixel_multiplier);
8113 PIPE_CONF_CHECK_I(pixel_multiplier);
8114 8558
8115 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 8559 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8116 DRM_MODE_FLAG_INTERLACE); 8560 DRM_MODE_FLAG_INTERLACE);
@@ -8141,6 +8585,7 @@ intel_pipe_config_compare(struct drm_device *dev,
8141 8585
8142 PIPE_CONF_CHECK_I(shared_dpll); 8586 PIPE_CONF_CHECK_I(shared_dpll);
8143 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 8587 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8588 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8144 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 8589 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8145 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 8590 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8146 8591
@@ -8149,6 +8594,15 @@ intel_pipe_config_compare(struct drm_device *dev,
8149#undef PIPE_CONF_CHECK_FLAGS 8594#undef PIPE_CONF_CHECK_FLAGS
8150#undef PIPE_CONF_QUIRK 8595#undef PIPE_CONF_QUIRK
8151 8596
8597 if (!IS_HASWELL(dev)) {
8598 if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
8599 DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
8600 current_config->adjusted_mode.clock,
8601 pipe_config->adjusted_mode.clock);
8602 return false;
8603 }
8604 }
8605
8152 return true; 8606 return true;
8153} 8607}
8154 8608
@@ -8272,12 +8726,17 @@ check_crtc_state(struct drm_device *dev)
8272 8726
8273 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 8727 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8274 base.head) { 8728 base.head) {
8729 enum pipe pipe;
8275 if (encoder->base.crtc != &crtc->base) 8730 if (encoder->base.crtc != &crtc->base)
8276 continue; 8731 continue;
8277 if (encoder->get_config) 8732 if (encoder->get_config &&
8733 encoder->get_hw_state(encoder, &pipe))
8278 encoder->get_config(encoder, &pipe_config); 8734 encoder->get_config(encoder, &pipe_config);
8279 } 8735 }
8280 8736
8737 if (dev_priv->display.get_clock)
8738 dev_priv->display.get_clock(crtc, &pipe_config);
8739
8281 WARN(crtc->active != active, 8740 WARN(crtc->active != active,
8282 "crtc active state doesn't match with hw state " 8741 "crtc active state doesn't match with hw state "
8283 "(expected %i, found %i)\n", crtc->active, active); 8742 "(expected %i, found %i)\n", crtc->active, active);
@@ -8317,6 +8776,8 @@ check_shared_dpll_state(struct drm_device *dev)
8317 pll->active, pll->refcount); 8776 pll->active, pll->refcount);
8318 WARN(pll->active && !pll->on, 8777 WARN(pll->active && !pll->on,
8319 "pll in active use but not on in sw tracking\n"); 8778 "pll in active use but not on in sw tracking\n");
8779 WARN(pll->on && !pll->active,
8780 "pll in on but not on in use in sw tracking\n");
8320 WARN(pll->on != active, 8781 WARN(pll->on != active,
8321 "pll on state mismatch (expected %i, found %i)\n", 8782 "pll on state mismatch (expected %i, found %i)\n",
8322 pll->on, active); 8783 pll->on, active);
@@ -8453,9 +8914,9 @@ out:
8453 return ret; 8914 return ret;
8454} 8915}
8455 8916
8456int intel_set_mode(struct drm_crtc *crtc, 8917static int intel_set_mode(struct drm_crtc *crtc,
8457 struct drm_display_mode *mode, 8918 struct drm_display_mode *mode,
8458 int x, int y, struct drm_framebuffer *fb) 8919 int x, int y, struct drm_framebuffer *fb)
8459{ 8920{
8460 int ret; 8921 int ret;
8461 8922
@@ -8541,15 +9002,20 @@ static void intel_set_config_restore_state(struct drm_device *dev,
8541} 9002}
8542 9003
8543static bool 9004static bool
8544is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors, 9005is_crtc_connector_off(struct drm_mode_set *set)
8545 int num_connectors)
8546{ 9006{
8547 int i; 9007 int i;
8548 9008
8549 for (i = 0; i < num_connectors; i++) 9009 if (set->num_connectors == 0)
8550 if (connectors[i].encoder && 9010 return false;
8551 connectors[i].encoder->crtc == crtc && 9011
8552 connectors[i].dpms != DRM_MODE_DPMS_ON) 9012 if (WARN_ON(set->connectors == NULL))
9013 return false;
9014
9015 for (i = 0; i < set->num_connectors; i++)
9016 if (set->connectors[i]->encoder &&
9017 set->connectors[i]->encoder->crtc == set->crtc &&
9018 set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
8553 return true; 9019 return true;
8554 9020
8555 return false; 9021 return false;
@@ -8562,15 +9028,21 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
8562 9028
8563 /* We should be able to check here if the fb has the same properties 9029 /* We should be able to check here if the fb has the same properties
8564 * and then just flip_or_move it */ 9030 * and then just flip_or_move it */
8565 if (set->connectors != NULL && 9031 if (is_crtc_connector_off(set)) {
8566 is_crtc_connector_off(set->crtc, *set->connectors, 9032 config->mode_changed = true;
8567 set->num_connectors)) {
8568 config->mode_changed = true;
8569 } else if (set->crtc->fb != set->fb) { 9033 } else if (set->crtc->fb != set->fb) {
8570 /* If we have no fb then treat it as a full mode set */ 9034 /* If we have no fb then treat it as a full mode set */
8571 if (set->crtc->fb == NULL) { 9035 if (set->crtc->fb == NULL) {
8572 DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); 9036 struct intel_crtc *intel_crtc =
8573 config->mode_changed = true; 9037 to_intel_crtc(set->crtc);
9038
9039 if (intel_crtc->active && i915_fastboot) {
9040 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9041 config->fb_changed = true;
9042 } else {
9043 DRM_DEBUG_KMS("inactive crtc, full mode set\n");
9044 config->mode_changed = true;
9045 }
8574 } else if (set->fb == NULL) { 9046 } else if (set->fb == NULL) {
8575 config->mode_changed = true; 9047 config->mode_changed = true;
8576 } else if (set->fb->pixel_format != 9048 } else if (set->fb->pixel_format !=
@@ -8590,6 +9062,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
8590 drm_mode_debug_printmodeline(set->mode); 9062 drm_mode_debug_printmodeline(set->mode);
8591 config->mode_changed = true; 9063 config->mode_changed = true;
8592 } 9064 }
9065
9066 DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
9067 set->crtc->base.id, config->mode_changed, config->fb_changed);
8593} 9068}
8594 9069
8595static int 9070static int
@@ -8600,14 +9075,13 @@ intel_modeset_stage_output_state(struct drm_device *dev,
8600 struct drm_crtc *new_crtc; 9075 struct drm_crtc *new_crtc;
8601 struct intel_connector *connector; 9076 struct intel_connector *connector;
8602 struct intel_encoder *encoder; 9077 struct intel_encoder *encoder;
8603 int count, ro; 9078 int ro;
8604 9079
8605 /* The upper layers ensure that we either disable a crtc or have a list 9080 /* The upper layers ensure that we either disable a crtc or have a list
8606 * of connectors. For paranoia, double-check this. */ 9081 * of connectors. For paranoia, double-check this. */
8607 WARN_ON(!set->fb && (set->num_connectors != 0)); 9082 WARN_ON(!set->fb && (set->num_connectors != 0));
8608 WARN_ON(set->fb && (set->num_connectors == 0)); 9083 WARN_ON(set->fb && (set->num_connectors == 0));
8609 9084
8610 count = 0;
8611 list_for_each_entry(connector, &dev->mode_config.connector_list, 9085 list_for_each_entry(connector, &dev->mode_config.connector_list,
8612 base.head) { 9086 base.head) {
8613 /* Otherwise traverse passed in connector list and get encoders 9087 /* Otherwise traverse passed in connector list and get encoders
@@ -8641,7 +9115,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
8641 /* connector->new_encoder is now updated for all connectors. */ 9115 /* connector->new_encoder is now updated for all connectors. */
8642 9116
8643 /* Update crtc of enabled connectors. */ 9117 /* Update crtc of enabled connectors. */
8644 count = 0;
8645 list_for_each_entry(connector, &dev->mode_config.connector_list, 9118 list_for_each_entry(connector, &dev->mode_config.connector_list,
8646 base.head) { 9119 base.head) {
8647 if (!connector->new_encoder) 9120 if (!connector->new_encoder)
@@ -8800,19 +9273,32 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
8800 return val & DPLL_VCO_ENABLE; 9273 return val & DPLL_VCO_ENABLE;
8801} 9274}
8802 9275
9276static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
9277 struct intel_shared_dpll *pll)
9278{
9279 I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
9280 I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
9281}
9282
8803static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 9283static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
8804 struct intel_shared_dpll *pll) 9284 struct intel_shared_dpll *pll)
8805{ 9285{
8806 uint32_t reg, val;
8807
8808 /* PCH refclock must be enabled first */ 9286 /* PCH refclock must be enabled first */
8809 assert_pch_refclk_enabled(dev_priv); 9287 assert_pch_refclk_enabled(dev_priv);
8810 9288
8811 reg = PCH_DPLL(pll->id); 9289 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
8812 val = I915_READ(reg); 9290
8813 val |= DPLL_VCO_ENABLE; 9291 /* Wait for the clocks to stabilize. */
8814 I915_WRITE(reg, val); 9292 POSTING_READ(PCH_DPLL(pll->id));
8815 POSTING_READ(reg); 9293 udelay(150);
9294
9295 /* The pixel multiplier can only be updated once the
9296 * DPLL is enabled and the clocks are stable.
9297 *
9298 * So write it again.
9299 */
9300 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
9301 POSTING_READ(PCH_DPLL(pll->id));
8816 udelay(200); 9302 udelay(200);
8817} 9303}
8818 9304
@@ -8821,7 +9307,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
8821{ 9307{
8822 struct drm_device *dev = dev_priv->dev; 9308 struct drm_device *dev = dev_priv->dev;
8823 struct intel_crtc *crtc; 9309 struct intel_crtc *crtc;
8824 uint32_t reg, val;
8825 9310
8826 /* Make sure no transcoder isn't still depending on us. */ 9311 /* Make sure no transcoder isn't still depending on us. */
8827 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 9312 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
@@ -8829,11 +9314,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
8829 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 9314 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
8830 } 9315 }
8831 9316
8832 reg = PCH_DPLL(pll->id); 9317 I915_WRITE(PCH_DPLL(pll->id), 0);
8833 val = I915_READ(reg); 9318 POSTING_READ(PCH_DPLL(pll->id));
8834 val &= ~DPLL_VCO_ENABLE;
8835 I915_WRITE(reg, val);
8836 POSTING_READ(reg);
8837 udelay(200); 9319 udelay(200);
8838} 9320}
8839 9321
@@ -8852,6 +9334,7 @@ static void ibx_pch_dpll_init(struct drm_device *dev)
8852 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 9334 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8853 dev_priv->shared_dplls[i].id = i; 9335 dev_priv->shared_dplls[i].id = i;
8854 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 9336 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
9337 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
8855 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 9338 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
8856 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 9339 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
8857 dev_priv->shared_dplls[i].get_hw_state = 9340 dev_priv->shared_dplls[i].get_hw_state =
@@ -9031,8 +9514,13 @@ static void intel_setup_outputs(struct drm_device *dev)
9031 intel_dp_init(dev, PCH_DP_D, PORT_D); 9514 intel_dp_init(dev, PCH_DP_D, PORT_D);
9032 } else if (IS_VALLEYVIEW(dev)) { 9515 } else if (IS_VALLEYVIEW(dev)) {
9033 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ 9516 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
9034 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 9517 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
9035 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 9518 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
9519 PORT_C);
9520 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
9521 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
9522 PORT_C);
9523 }
9036 9524
9037 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { 9525 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
9038 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 9526 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
@@ -9092,13 +9580,17 @@ static void intel_setup_outputs(struct drm_device *dev)
9092 drm_helper_move_panel_connectors_to_head(dev); 9580 drm_helper_move_panel_connectors_to_head(dev);
9093} 9581}
9094 9582
9583void intel_framebuffer_fini(struct intel_framebuffer *fb)
9584{
9585 drm_framebuffer_cleanup(&fb->base);
9586 drm_gem_object_unreference_unlocked(&fb->obj->base);
9587}
9588
9095static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 9589static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
9096{ 9590{
9097 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 9591 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
9098 9592
9099 drm_framebuffer_cleanup(fb); 9593 intel_framebuffer_fini(intel_fb);
9100 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
9101
9102 kfree(intel_fb); 9594 kfree(intel_fb);
9103} 9595}
9104 9596
@@ -9268,6 +9760,7 @@ static void intel_init_display(struct drm_device *dev)
9268 dev_priv->display.update_plane = ironlake_update_plane; 9760 dev_priv->display.update_plane = ironlake_update_plane;
9269 } else if (HAS_PCH_SPLIT(dev)) { 9761 } else if (HAS_PCH_SPLIT(dev)) {
9270 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 9762 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
9763 dev_priv->display.get_clock = ironlake_crtc_clock_get;
9271 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 9764 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
9272 dev_priv->display.crtc_enable = ironlake_crtc_enable; 9765 dev_priv->display.crtc_enable = ironlake_crtc_enable;
9273 dev_priv->display.crtc_disable = ironlake_crtc_disable; 9766 dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9275,6 +9768,7 @@ static void intel_init_display(struct drm_device *dev)
9275 dev_priv->display.update_plane = ironlake_update_plane; 9768 dev_priv->display.update_plane = ironlake_update_plane;
9276 } else if (IS_VALLEYVIEW(dev)) { 9769 } else if (IS_VALLEYVIEW(dev)) {
9277 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 9770 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9771 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9278 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 9772 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9279 dev_priv->display.crtc_enable = valleyview_crtc_enable; 9773 dev_priv->display.crtc_enable = valleyview_crtc_enable;
9280 dev_priv->display.crtc_disable = i9xx_crtc_disable; 9774 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9282,6 +9776,7 @@ static void intel_init_display(struct drm_device *dev)
9282 dev_priv->display.update_plane = i9xx_update_plane; 9776 dev_priv->display.update_plane = i9xx_update_plane;
9283 } else { 9777 } else {
9284 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 9778 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9779 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9285 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 9780 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9286 dev_priv->display.crtc_enable = i9xx_crtc_enable; 9781 dev_priv->display.crtc_enable = i9xx_crtc_enable;
9287 dev_priv->display.crtc_disable = i9xx_crtc_disable; 9782 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9299,9 +9794,12 @@ static void intel_init_display(struct drm_device *dev)
9299 else if (IS_I915G(dev)) 9794 else if (IS_I915G(dev))
9300 dev_priv->display.get_display_clock_speed = 9795 dev_priv->display.get_display_clock_speed =
9301 i915_get_display_clock_speed; 9796 i915_get_display_clock_speed;
9302 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) 9797 else if (IS_I945GM(dev) || IS_845G(dev))
9303 dev_priv->display.get_display_clock_speed = 9798 dev_priv->display.get_display_clock_speed =
9304 i9xx_misc_get_display_clock_speed; 9799 i9xx_misc_get_display_clock_speed;
9800 else if (IS_PINEVIEW(dev))
9801 dev_priv->display.get_display_clock_speed =
9802 pnv_get_display_clock_speed;
9305 else if (IS_I915GM(dev)) 9803 else if (IS_I915GM(dev))
9306 dev_priv->display.get_display_clock_speed = 9804 dev_priv->display.get_display_clock_speed =
9307 i915gm_get_display_clock_speed; 9805 i915gm_get_display_clock_speed;
@@ -9398,6 +9896,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
9398 DRM_INFO("applying inverted panel brightness quirk\n"); 9896 DRM_INFO("applying inverted panel brightness quirk\n");
9399} 9897}
9400 9898
9899/*
9900 * Some machines (Dell XPS13) suffer broken backlight controls if
9901 * BLM_PCH_PWM_ENABLE is set.
9902 */
9903static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
9904{
9905 struct drm_i915_private *dev_priv = dev->dev_private;
9906 dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
9907 DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
9908}
9909
9401struct intel_quirk { 9910struct intel_quirk {
9402 int device; 9911 int device;
9403 int subsystem_vendor; 9912 int subsystem_vendor;
@@ -9467,6 +9976,11 @@ static struct intel_quirk intel_quirks[] = {
9467 9976
9468 /* Acer Aspire 4736Z */ 9977 /* Acer Aspire 4736Z */
9469 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 9978 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
9979
9980 /* Dell XPS13 HD Sandy Bridge */
9981 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
9982 /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
9983 { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
9470}; 9984};
9471 9985
9472static void intel_init_quirks(struct drm_device *dev) 9986static void intel_init_quirks(struct drm_device *dev)
@@ -9566,7 +10080,7 @@ void intel_modeset_init(struct drm_device *dev)
9566 INTEL_INFO(dev)->num_pipes, 10080 INTEL_INFO(dev)->num_pipes,
9567 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 10081 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
9568 10082
9569 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { 10083 for_each_pipe(i) {
9570 intel_crtc_init(dev, i); 10084 intel_crtc_init(dev, i);
9571 for (j = 0; j < dev_priv->num_plane; j++) { 10085 for (j = 0; j < dev_priv->num_plane; j++) {
9572 ret = intel_plane_init(dev, i, j); 10086 ret = intel_plane_init(dev, i, j);
@@ -9772,6 +10286,17 @@ void i915_redisable_vga(struct drm_device *dev)
9772 struct drm_i915_private *dev_priv = dev->dev_private; 10286 struct drm_i915_private *dev_priv = dev->dev_private;
9773 u32 vga_reg = i915_vgacntrl_reg(dev); 10287 u32 vga_reg = i915_vgacntrl_reg(dev);
9774 10288
10289 /* This function can be called both from intel_modeset_setup_hw_state or
10290 * at a very early point in our resume sequence, where the power well
10291 * structures are not yet restored. Since this function is at a very
10292 * paranoid "someone might have enabled VGA while we were not looking"
10293 * level, just check if the power well is enabled instead of trying to
10294 * follow the "don't touch the power well if we don't need it" policy
10295 * the rest of the driver uses. */
10296 if (HAS_POWER_WELL(dev) &&
10297 (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
10298 return;
10299
9775 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10300 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
9776 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10301 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
9777 i915_disable_vga(dev); 10302 i915_disable_vga(dev);
@@ -9817,8 +10342,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
9817 } 10342 }
9818 pll->refcount = pll->active; 10343 pll->refcount = pll->active;
9819 10344
9820 DRM_DEBUG_KMS("%s hw state readout: refcount %i\n", 10345 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
9821 pll->name, pll->refcount); 10346 pll->name, pll->refcount, pll->on);
9822 } 10347 }
9823 10348
9824 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10349 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -9842,6 +10367,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
9842 pipe); 10367 pipe);
9843 } 10368 }
9844 10369
10370 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10371 base.head) {
10372 if (!crtc->active)
10373 continue;
10374 if (dev_priv->display.get_clock)
10375 dev_priv->display.get_clock(crtc,
10376 &crtc->config);
10377 }
10378
9845 list_for_each_entry(connector, &dev->mode_config.connector_list, 10379 list_for_each_entry(connector, &dev->mode_config.connector_list,
9846 base.head) { 10380 base.head) {
9847 if (connector->get_hw_state(connector)) { 10381 if (connector->get_hw_state(connector)) {
@@ -9869,9 +10403,26 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9869 struct drm_plane *plane; 10403 struct drm_plane *plane;
9870 struct intel_crtc *crtc; 10404 struct intel_crtc *crtc;
9871 struct intel_encoder *encoder; 10405 struct intel_encoder *encoder;
10406 int i;
9872 10407
9873 intel_modeset_readout_hw_state(dev); 10408 intel_modeset_readout_hw_state(dev);
9874 10409
10410 /*
10411 * Now that we have the config, copy it to each CRTC struct
10412 * Note that this could go away if we move to using crtc_config
10413 * checking everywhere.
10414 */
10415 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10416 base.head) {
10417 if (crtc->active && i915_fastboot) {
10418 intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
10419
10420 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
10421 crtc->base.base.id);
10422 drm_mode_debug_printmodeline(&crtc->base.mode);
10423 }
10424 }
10425
9875 /* HW state is read out, now we need to sanitize this mess. */ 10426 /* HW state is read out, now we need to sanitize this mess. */
9876 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10427 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9877 base.head) { 10428 base.head) {
@@ -9884,6 +10435,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9884 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); 10435 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
9885 } 10436 }
9886 10437
10438 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10439 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10440
10441 if (!pll->on || pll->active)
10442 continue;
10443
10444 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
10445
10446 pll->disable(dev_priv, pll);
10447 pll->on = false;
10448 }
10449
9887 if (force_restore) { 10450 if (force_restore) {
9888 /* 10451 /*
9889 * We need to use raw interfaces for restoring state to avoid 10452 * We need to use raw interfaces for restoring state to avoid
@@ -9922,7 +10485,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
9922{ 10485{
9923 struct drm_i915_private *dev_priv = dev->dev_private; 10486 struct drm_i915_private *dev_priv = dev->dev_private;
9924 struct drm_crtc *crtc; 10487 struct drm_crtc *crtc;
9925 struct intel_crtc *intel_crtc;
9926 10488
9927 /* 10489 /*
9928 * Interrupts and polling as the first thing to avoid creating havoc. 10490 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -9946,7 +10508,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
9946 if (!crtc->fb) 10508 if (!crtc->fb)
9947 continue; 10509 continue;
9948 10510
9949 intel_crtc = to_intel_crtc(crtc);
9950 intel_increase_pllclock(crtc); 10511 intel_increase_pllclock(crtc);
9951 } 10512 }
9952 10513
@@ -10002,13 +10563,12 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
10002 return 0; 10563 return 0;
10003} 10564}
10004 10565
10005#ifdef CONFIG_DEBUG_FS
10006#include <linux/seq_file.h>
10007
10008struct intel_display_error_state { 10566struct intel_display_error_state {
10009 10567
10010 u32 power_well_driver; 10568 u32 power_well_driver;
10011 10569
10570 int num_transcoders;
10571
10012 struct intel_cursor_error_state { 10572 struct intel_cursor_error_state {
10013 u32 control; 10573 u32 control;
10014 u32 position; 10574 u32 position;
@@ -10017,16 +10577,7 @@ struct intel_display_error_state {
10017 } cursor[I915_MAX_PIPES]; 10577 } cursor[I915_MAX_PIPES];
10018 10578
10019 struct intel_pipe_error_state { 10579 struct intel_pipe_error_state {
10020 enum transcoder cpu_transcoder;
10021 u32 conf;
10022 u32 source; 10580 u32 source;
10023
10024 u32 htotal;
10025 u32 hblank;
10026 u32 hsync;
10027 u32 vtotal;
10028 u32 vblank;
10029 u32 vsync;
10030 } pipe[I915_MAX_PIPES]; 10581 } pipe[I915_MAX_PIPES];
10031 10582
10032 struct intel_plane_error_state { 10583 struct intel_plane_error_state {
@@ -10038,6 +10589,19 @@ struct intel_display_error_state {
10038 u32 surface; 10589 u32 surface;
10039 u32 tile_offset; 10590 u32 tile_offset;
10040 } plane[I915_MAX_PIPES]; 10591 } plane[I915_MAX_PIPES];
10592
10593 struct intel_transcoder_error_state {
10594 enum transcoder cpu_transcoder;
10595
10596 u32 conf;
10597
10598 u32 htotal;
10599 u32 hblank;
10600 u32 hsync;
10601 u32 vtotal;
10602 u32 vblank;
10603 u32 vsync;
10604 } transcoder[4];
10041}; 10605};
10042 10606
10043struct intel_display_error_state * 10607struct intel_display_error_state *
@@ -10045,9 +10609,17 @@ intel_display_capture_error_state(struct drm_device *dev)
10045{ 10609{
10046 drm_i915_private_t *dev_priv = dev->dev_private; 10610 drm_i915_private_t *dev_priv = dev->dev_private;
10047 struct intel_display_error_state *error; 10611 struct intel_display_error_state *error;
10048 enum transcoder cpu_transcoder; 10612 int transcoders[] = {
10613 TRANSCODER_A,
10614 TRANSCODER_B,
10615 TRANSCODER_C,
10616 TRANSCODER_EDP,
10617 };
10049 int i; 10618 int i;
10050 10619
10620 if (INTEL_INFO(dev)->num_pipes == 0)
10621 return NULL;
10622
10051 error = kmalloc(sizeof(*error), GFP_ATOMIC); 10623 error = kmalloc(sizeof(*error), GFP_ATOMIC);
10052 if (error == NULL) 10624 if (error == NULL)
10053 return NULL; 10625 return NULL;
@@ -10056,9 +10628,6 @@ intel_display_capture_error_state(struct drm_device *dev)
10056 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 10628 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
10057 10629
10058 for_each_pipe(i) { 10630 for_each_pipe(i) {
10059 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
10060 error->pipe[i].cpu_transcoder = cpu_transcoder;
10061
10062 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { 10631 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
10063 error->cursor[i].control = I915_READ(CURCNTR(i)); 10632 error->cursor[i].control = I915_READ(CURCNTR(i));
10064 error->cursor[i].position = I915_READ(CURPOS(i)); 10633 error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10082,22 +10651,32 @@ intel_display_capture_error_state(struct drm_device *dev)
10082 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 10651 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
10083 } 10652 }
10084 10653
10085 error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
10086 error->pipe[i].source = I915_READ(PIPESRC(i)); 10654 error->pipe[i].source = I915_READ(PIPESRC(i));
10087 error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 10655 }
10088 error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 10656
10089 error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 10657 error->num_transcoders = INTEL_INFO(dev)->num_pipes;
10090 error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 10658 if (HAS_DDI(dev_priv->dev))
10091 error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 10659 error->num_transcoders++; /* Account for eDP. */
10092 error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 10660
10661 for (i = 0; i < error->num_transcoders; i++) {
10662 enum transcoder cpu_transcoder = transcoders[i];
10663
10664 error->transcoder[i].cpu_transcoder = cpu_transcoder;
10665
10666 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
10667 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
10668 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
10669 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
10670 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
10671 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
10672 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
10093 } 10673 }
10094 10674
10095 /* In the code above we read the registers without checking if the power 10675 /* In the code above we read the registers without checking if the power
10096 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to 10676 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
10097 * prevent the next I915_WRITE from detecting it and printing an error 10677 * prevent the next I915_WRITE from detecting it and printing an error
10098 * message. */ 10678 * message. */
10099 if (HAS_POWER_WELL(dev)) 10679 intel_uncore_clear_errors(dev);
10100 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
10101 10680
10102 return error; 10681 return error;
10103} 10682}
@@ -10111,22 +10690,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
10111{ 10690{
10112 int i; 10691 int i;
10113 10692
10693 if (!error)
10694 return;
10695
10114 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 10696 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
10115 if (HAS_POWER_WELL(dev)) 10697 if (HAS_POWER_WELL(dev))
10116 err_printf(m, "PWR_WELL_CTL2: %08x\n", 10698 err_printf(m, "PWR_WELL_CTL2: %08x\n",
10117 error->power_well_driver); 10699 error->power_well_driver);
10118 for_each_pipe(i) { 10700 for_each_pipe(i) {
10119 err_printf(m, "Pipe [%d]:\n", i); 10701 err_printf(m, "Pipe [%d]:\n", i);
10120 err_printf(m, " CPU transcoder: %c\n",
10121 transcoder_name(error->pipe[i].cpu_transcoder));
10122 err_printf(m, " CONF: %08x\n", error->pipe[i].conf);
10123 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 10702 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
10124 err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
10125 err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
10126 err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
10127 err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
10128 err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
10129 err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
10130 10703
10131 err_printf(m, "Plane [%d]:\n", i); 10704 err_printf(m, "Plane [%d]:\n", i);
10132 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 10705 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
@@ -10147,5 +10720,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
10147 err_printf(m, " POS: %08x\n", error->cursor[i].position); 10720 err_printf(m, " POS: %08x\n", error->cursor[i].position);
10148 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 10721 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
10149 } 10722 }
10723
10724 for (i = 0; i < error->num_transcoders; i++) {
10725 err_printf(m, " CPU transcoder: %c\n",
10726 transcoder_name(error->transcoder[i].cpu_transcoder));
10727 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
10728 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
10729 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
10730 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
10731 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
10732 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
10733 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
10734 }
10150} 10735}
10151#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 26e162bb3a51..2151d13772b8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -276,29 +276,13 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
276 return status; 276 return status;
277} 277}
278 278
279static int 279static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
280intel_dp_aux_ch(struct intel_dp *intel_dp, 280 int index)
281 uint8_t *send, int send_bytes,
282 uint8_t *recv, int recv_size)
283{ 281{
284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 282 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
285 struct drm_device *dev = intel_dig_port->base.base.dev; 283 struct drm_device *dev = intel_dig_port->base.base.dev;
286 struct drm_i915_private *dev_priv = dev->dev_private; 284 struct drm_i915_private *dev_priv = dev->dev_private;
287 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
288 uint32_t ch_data = ch_ctl + 4;
289 int i, ret, recv_bytes;
290 uint32_t status;
291 uint32_t aux_clock_divider;
292 int try, precharge;
293 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
294 285
295 /* dp aux is extremely sensitive to irq latency, hence request the
296 * lowest possible wakeup latency and so prevent the cpu from going into
297 * deep sleep states.
298 */
299 pm_qos_update_request(&dev_priv->pm_qos, 0);
300
301 intel_dp_check_edp(intel_dp);
302 /* The clock divider is based off the hrawclk, 286 /* The clock divider is based off the hrawclk,
303 * and would like to run at 2MHz. So, take the 287 * and would like to run at 2MHz. So, take the
304 * hrawclk value and divide by 2 and use that 288 * hrawclk value and divide by 2 and use that
@@ -307,29 +291,61 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
307 * clock divider. 291 * clock divider.
308 */ 292 */
309 if (IS_VALLEYVIEW(dev)) { 293 if (IS_VALLEYVIEW(dev)) {
310 aux_clock_divider = 100; 294 return index ? 0 : 100;
311 } else if (intel_dig_port->port == PORT_A) { 295 } else if (intel_dig_port->port == PORT_A) {
296 if (index)
297 return 0;
312 if (HAS_DDI(dev)) 298 if (HAS_DDI(dev))
313 aux_clock_divider = DIV_ROUND_CLOSEST( 299 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
314 intel_ddi_get_cdclk_freq(dev_priv), 2000);
315 else if (IS_GEN6(dev) || IS_GEN7(dev)) 300 else if (IS_GEN6(dev) || IS_GEN7(dev))
316 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 301 return 200; /* SNB & IVB eDP input clock at 400Mhz */
317 else 302 else
318 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 303 return 225; /* eDP input clock at 450Mhz */
319 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 304 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
320 /* Workaround for non-ULT HSW */ 305 /* Workaround for non-ULT HSW */
321 aux_clock_divider = 74; 306 switch (index) {
307 case 0: return 63;
308 case 1: return 72;
309 default: return 0;
310 }
322 } else if (HAS_PCH_SPLIT(dev)) { 311 } else if (HAS_PCH_SPLIT(dev)) {
323 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 312 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
324 } else { 313 } else {
325 aux_clock_divider = intel_hrawclk(dev) / 2; 314 return index ? 0 :intel_hrawclk(dev) / 2;
326 } 315 }
316}
317
318static int
319intel_dp_aux_ch(struct intel_dp *intel_dp,
320 uint8_t *send, int send_bytes,
321 uint8_t *recv, int recv_size)
322{
323 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
324 struct drm_device *dev = intel_dig_port->base.base.dev;
325 struct drm_i915_private *dev_priv = dev->dev_private;
326 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
327 uint32_t ch_data = ch_ctl + 4;
328 uint32_t aux_clock_divider;
329 int i, ret, recv_bytes;
330 uint32_t status;
331 int try, precharge, clock = 0;
332 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
333
334 /* dp aux is extremely sensitive to irq latency, hence request the
335 * lowest possible wakeup latency and so prevent the cpu from going into
336 * deep sleep states.
337 */
338 pm_qos_update_request(&dev_priv->pm_qos, 0);
339
340 intel_dp_check_edp(intel_dp);
327 341
328 if (IS_GEN6(dev)) 342 if (IS_GEN6(dev))
329 precharge = 3; 343 precharge = 3;
330 else 344 else
331 precharge = 5; 345 precharge = 5;
332 346
347 intel_aux_display_runtime_get(dev_priv);
348
333 /* Try to wait for any previous AUX channel activity */ 349 /* Try to wait for any previous AUX channel activity */
334 for (try = 0; try < 3; try++) { 350 for (try = 0; try < 3; try++) {
335 status = I915_READ_NOTRACE(ch_ctl); 351 status = I915_READ_NOTRACE(ch_ctl);
@@ -345,37 +361,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
345 goto out; 361 goto out;
346 } 362 }
347 363
348 /* Must try at least 3 times according to DP spec */ 364 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
349 for (try = 0; try < 5; try++) { 365 /* Must try at least 3 times according to DP spec */
350 /* Load the send data into the aux channel data registers */ 366 for (try = 0; try < 5; try++) {
351 for (i = 0; i < send_bytes; i += 4) 367 /* Load the send data into the aux channel data registers */
352 I915_WRITE(ch_data + i, 368 for (i = 0; i < send_bytes; i += 4)
353 pack_aux(send + i, send_bytes - i)); 369 I915_WRITE(ch_data + i,
354 370 pack_aux(send + i, send_bytes - i));
355 /* Send the command and wait for it to complete */ 371
356 I915_WRITE(ch_ctl, 372 /* Send the command and wait for it to complete */
357 DP_AUX_CH_CTL_SEND_BUSY | 373 I915_WRITE(ch_ctl,
358 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 374 DP_AUX_CH_CTL_SEND_BUSY |
359 DP_AUX_CH_CTL_TIME_OUT_400us | 375 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
360 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 376 DP_AUX_CH_CTL_TIME_OUT_400us |
361 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 377 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
362 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 378 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
363 DP_AUX_CH_CTL_DONE | 379 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
364 DP_AUX_CH_CTL_TIME_OUT_ERROR | 380 DP_AUX_CH_CTL_DONE |
365 DP_AUX_CH_CTL_RECEIVE_ERROR); 381 DP_AUX_CH_CTL_TIME_OUT_ERROR |
366 382 DP_AUX_CH_CTL_RECEIVE_ERROR);
367 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 383
368 384 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
369 /* Clear done status and any errors */ 385
370 I915_WRITE(ch_ctl, 386 /* Clear done status and any errors */
371 status | 387 I915_WRITE(ch_ctl,
372 DP_AUX_CH_CTL_DONE | 388 status |
373 DP_AUX_CH_CTL_TIME_OUT_ERROR | 389 DP_AUX_CH_CTL_DONE |
374 DP_AUX_CH_CTL_RECEIVE_ERROR); 390 DP_AUX_CH_CTL_TIME_OUT_ERROR |
375 391 DP_AUX_CH_CTL_RECEIVE_ERROR);
376 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 392
377 DP_AUX_CH_CTL_RECEIVE_ERROR)) 393 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
378 continue; 394 DP_AUX_CH_CTL_RECEIVE_ERROR))
395 continue;
396 if (status & DP_AUX_CH_CTL_DONE)
397 break;
398 }
379 if (status & DP_AUX_CH_CTL_DONE) 399 if (status & DP_AUX_CH_CTL_DONE)
380 break; 400 break;
381 } 401 }
@@ -416,6 +436,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
416 ret = recv_bytes; 436 ret = recv_bytes;
417out: 437out:
418 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 438 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
439 intel_aux_display_runtime_put(dev_priv);
419 440
420 return ret; 441 return ret;
421} 442}
@@ -710,8 +731,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
710 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 731 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
711 * bpc in between. */ 732 * bpc in between. */
712 bpp = pipe_config->pipe_bpp; 733 bpp = pipe_config->pipe_bpp;
713 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) 734 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
735 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
736 dev_priv->vbt.edp_bpp);
714 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); 737 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
738 }
715 739
716 for (; bpp >= 6*3; bpp -= 2*3) { 740 for (; bpp >= 6*3; bpp -= 2*3) {
717 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 741 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
@@ -812,15 +836,14 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
812 udelay(500); 836 udelay(500);
813} 837}
814 838
815static void 839static void intel_dp_mode_set(struct intel_encoder *encoder)
816intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
817 struct drm_display_mode *adjusted_mode)
818{ 840{
819 struct drm_device *dev = encoder->dev; 841 struct drm_device *dev = encoder->base.dev;
820 struct drm_i915_private *dev_priv = dev->dev_private; 842 struct drm_i915_private *dev_priv = dev->dev_private;
821 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 843 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
822 enum port port = dp_to_dig_port(intel_dp)->port; 844 enum port port = dp_to_dig_port(intel_dp)->port;
823 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); 845 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
846 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
824 847
825 /* 848 /*
826 * There are four kinds of DP registers: 849 * There are four kinds of DP registers:
@@ -852,7 +875,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
852 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 875 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
853 pipe_name(crtc->pipe)); 876 pipe_name(crtc->pipe));
854 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 877 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
855 intel_write_eld(encoder, adjusted_mode); 878 intel_write_eld(&encoder->base, adjusted_mode);
856 } 879 }
857 880
858 intel_dp_init_link_config(intel_dp); 881 intel_dp_init_link_config(intel_dp);
@@ -1360,6 +1383,275 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1360 } 1383 }
1361 1384
1362 pipe_config->adjusted_mode.flags |= flags; 1385 pipe_config->adjusted_mode.flags |= flags;
1386
1387 if (dp_to_dig_port(intel_dp)->port == PORT_A) {
1388 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1389 pipe_config->port_clock = 162000;
1390 else
1391 pipe_config->port_clock = 270000;
1392 }
1393}
1394
1395static bool is_edp_psr(struct intel_dp *intel_dp)
1396{
1397 return is_edp(intel_dp) &&
1398 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1399}
1400
1401static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1402{
1403 struct drm_i915_private *dev_priv = dev->dev_private;
1404
1405 if (!IS_HASWELL(dev))
1406 return false;
1407
1408 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
1409}
1410
1411static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1412 struct edp_vsc_psr *vsc_psr)
1413{
1414 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1415 struct drm_device *dev = dig_port->base.base.dev;
1416 struct drm_i915_private *dev_priv = dev->dev_private;
1417 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1418 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1419 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1420 uint32_t *data = (uint32_t *) vsc_psr;
1421 unsigned int i;
1422
1423 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
1424 the video DIP being updated before program video DIP data buffer
1425 registers for DIP being updated. */
1426 I915_WRITE(ctl_reg, 0);
1427 POSTING_READ(ctl_reg);
1428
1429 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1430 if (i < sizeof(struct edp_vsc_psr))
1431 I915_WRITE(data_reg + i, *data++);
1432 else
1433 I915_WRITE(data_reg + i, 0);
1434 }
1435
1436 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1437 POSTING_READ(ctl_reg);
1438}
1439
1440static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1441{
1442 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1443 struct drm_i915_private *dev_priv = dev->dev_private;
1444 struct edp_vsc_psr psr_vsc;
1445
1446 if (intel_dp->psr_setup_done)
1447 return;
1448
1449 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1450 memset(&psr_vsc, 0, sizeof(psr_vsc));
1451 psr_vsc.sdp_header.HB0 = 0;
1452 psr_vsc.sdp_header.HB1 = 0x7;
1453 psr_vsc.sdp_header.HB2 = 0x2;
1454 psr_vsc.sdp_header.HB3 = 0x8;
1455 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1456
1457 /* Avoid continuous PSR exit by masking memup and hpd */
1458 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
1459 EDP_PSR_DEBUG_MASK_HPD);
1460
1461 intel_dp->psr_setup_done = true;
1462}
1463
1464static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1465{
1466 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1467 struct drm_i915_private *dev_priv = dev->dev_private;
1468 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
1469 int precharge = 0x3;
1470 int msg_size = 5; /* Header(4) + Message(1) */
1471
1472 /* Enable PSR in sink */
1473 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1474 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1475 DP_PSR_ENABLE &
1476 ~DP_PSR_MAIN_LINK_ACTIVE);
1477 else
1478 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1479 DP_PSR_ENABLE |
1480 DP_PSR_MAIN_LINK_ACTIVE);
1481
1482 /* Setup AUX registers */
1483 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
1484 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
1485 I915_WRITE(EDP_PSR_AUX_CTL,
1486 DP_AUX_CH_CTL_TIME_OUT_400us |
1487 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1488 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1489 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1490}
1491
1492static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1493{
1494 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1495 struct drm_i915_private *dev_priv = dev->dev_private;
1496 uint32_t max_sleep_time = 0x1f;
1497 uint32_t idle_frames = 1;
1498 uint32_t val = 0x0;
1499
1500 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1501 val |= EDP_PSR_LINK_STANDBY;
1502 val |= EDP_PSR_TP2_TP3_TIME_0us;
1503 val |= EDP_PSR_TP1_TIME_0us;
1504 val |= EDP_PSR_SKIP_AUX_EXIT;
1505 } else
1506 val |= EDP_PSR_LINK_DISABLE;
1507
1508 I915_WRITE(EDP_PSR_CTL, val |
1509 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
1510 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1511 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1512 EDP_PSR_ENABLE);
1513}
1514
1515static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1516{
1517 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1518 struct drm_device *dev = dig_port->base.base.dev;
1519 struct drm_i915_private *dev_priv = dev->dev_private;
1520 struct drm_crtc *crtc = dig_port->base.base.crtc;
1521 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1522 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1523 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1524
1525 if (!IS_HASWELL(dev)) {
1526 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1527 dev_priv->no_psr_reason = PSR_NO_SOURCE;
1528 return false;
1529 }
1530
1531 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1532 (dig_port->port != PORT_A)) {
1533 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1534 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
1535 return false;
1536 }
1537
1538 if (!is_edp_psr(intel_dp)) {
1539 DRM_DEBUG_KMS("PSR not supported by this panel\n");
1540 dev_priv->no_psr_reason = PSR_NO_SINK;
1541 return false;
1542 }
1543
1544 if (!i915_enable_psr) {
1545 DRM_DEBUG_KMS("PSR disable by flag\n");
1546 dev_priv->no_psr_reason = PSR_MODULE_PARAM;
1547 return false;
1548 }
1549
1550 crtc = dig_port->base.base.crtc;
1551 if (crtc == NULL) {
1552 DRM_DEBUG_KMS("crtc not active for PSR\n");
1553 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1554 return false;
1555 }
1556
1557 intel_crtc = to_intel_crtc(crtc);
1558 if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
1559 DRM_DEBUG_KMS("crtc not active for PSR\n");
1560 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1561 return false;
1562 }
1563
1564 obj = to_intel_framebuffer(crtc->fb)->obj;
1565 if (obj->tiling_mode != I915_TILING_X ||
1566 obj->fence_reg == I915_FENCE_REG_NONE) {
1567 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1568 dev_priv->no_psr_reason = PSR_NOT_TILED;
1569 return false;
1570 }
1571
1572 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1573 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1574 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
1575 return false;
1576 }
1577
1578 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1579 S3D_ENABLE) {
1580 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1581 dev_priv->no_psr_reason = PSR_S3D_ENABLED;
1582 return false;
1583 }
1584
1585 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
1586 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1587 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
1588 return false;
1589 }
1590
1591 return true;
1592}
1593
1594static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1595{
1596 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1597
1598 if (!intel_edp_psr_match_conditions(intel_dp) ||
1599 intel_edp_is_psr_enabled(dev))
1600 return;
1601
1602 /* Setup PSR once */
1603 intel_edp_psr_setup(intel_dp);
1604
1605 /* Enable PSR on the panel */
1606 intel_edp_psr_enable_sink(intel_dp);
1607
1608 /* Enable PSR on the host */
1609 intel_edp_psr_enable_source(intel_dp);
1610}
1611
1612void intel_edp_psr_enable(struct intel_dp *intel_dp)
1613{
1614 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1615
1616 if (intel_edp_psr_match_conditions(intel_dp) &&
1617 !intel_edp_is_psr_enabled(dev))
1618 intel_edp_psr_do_enable(intel_dp);
1619}
1620
1621void intel_edp_psr_disable(struct intel_dp *intel_dp)
1622{
1623 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1624 struct drm_i915_private *dev_priv = dev->dev_private;
1625
1626 if (!intel_edp_is_psr_enabled(dev))
1627 return;
1628
1629 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
1630
1631 /* Wait till PSR is idle */
1632 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
1633 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1634 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1635}
1636
1637void intel_edp_psr_update(struct drm_device *dev)
1638{
1639 struct intel_encoder *encoder;
1640 struct intel_dp *intel_dp = NULL;
1641
1642 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
1643 if (encoder->type == INTEL_OUTPUT_EDP) {
1644 intel_dp = enc_to_intel_dp(&encoder->base);
1645
1646 if (!is_edp_psr(intel_dp))
1647 return;
1648
1649 if (!intel_edp_psr_match_conditions(intel_dp))
1650 intel_edp_psr_disable(intel_dp);
1651 else
1652 if (!intel_edp_is_psr_enabled(dev))
1653 intel_edp_psr_do_enable(intel_dp);
1654 }
1363} 1655}
1364 1656
1365static void intel_disable_dp(struct intel_encoder *encoder) 1657static void intel_disable_dp(struct intel_encoder *encoder)
@@ -1411,47 +1703,50 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1411 intel_dp_complete_link_train(intel_dp); 1703 intel_dp_complete_link_train(intel_dp);
1412 intel_dp_stop_link_train(intel_dp); 1704 intel_dp_stop_link_train(intel_dp);
1413 ironlake_edp_backlight_on(intel_dp); 1705 ironlake_edp_backlight_on(intel_dp);
1706}
1414 1707
1415 if (IS_VALLEYVIEW(dev)) { 1708static void vlv_enable_dp(struct intel_encoder *encoder)
1416 struct intel_digital_port *dport = 1709{
1417 enc_to_dig_port(&encoder->base);
1418 int channel = vlv_dport_to_channel(dport);
1419
1420 vlv_wait_port_ready(dev_priv, channel);
1421 }
1422} 1710}
1423 1711
1424static void intel_pre_enable_dp(struct intel_encoder *encoder) 1712static void intel_pre_enable_dp(struct intel_encoder *encoder)
1425{ 1713{
1426 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1714 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1427 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1715 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1716
1717 if (dport->port == PORT_A)
1718 ironlake_edp_pll_on(intel_dp);
1719}
1720
1721static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1722{
1723 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1724 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1428 struct drm_device *dev = encoder->base.dev; 1725 struct drm_device *dev = encoder->base.dev;
1429 struct drm_i915_private *dev_priv = dev->dev_private; 1726 struct drm_i915_private *dev_priv = dev->dev_private;
1727 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1728 int port = vlv_dport_to_channel(dport);
1729 int pipe = intel_crtc->pipe;
1730 u32 val;
1430 1731
1431 if (dport->port == PORT_A && !IS_VALLEYVIEW(dev)) 1732 mutex_lock(&dev_priv->dpio_lock);
1432 ironlake_edp_pll_on(intel_dp);
1433 1733
1434 if (IS_VALLEYVIEW(dev)) { 1734 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1435 struct intel_crtc *intel_crtc = 1735 val = 0;
1436 to_intel_crtc(encoder->base.crtc); 1736 if (pipe)
1437 int port = vlv_dport_to_channel(dport); 1737 val |= (1<<21);
1438 int pipe = intel_crtc->pipe; 1738 else
1439 u32 val; 1739 val &= ~(1<<21);
1440 1740 val |= 0x001000c4;
1441 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1741 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1442 val = 0; 1742 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
1443 if (pipe) 1743 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
1444 val |= (1<<21);
1445 else
1446 val &= ~(1<<21);
1447 val |= 0x001000c4;
1448 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1449 1744
1450 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 1745 mutex_unlock(&dev_priv->dpio_lock);
1451 0x00760018); 1746
1452 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 1747 intel_enable_dp(encoder);
1453 0x00400888); 1748
1454 } 1749 vlv_wait_port_ready(dev_priv, port);
1455} 1750}
1456 1751
1457static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) 1752static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -1465,6 +1760,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1465 return; 1760 return;
1466 1761
1467 /* Program Tx lane resets to default */ 1762 /* Program Tx lane resets to default */
1763 mutex_lock(&dev_priv->dpio_lock);
1468 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1764 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1469 DPIO_PCS_TX_LANE2_RESET | 1765 DPIO_PCS_TX_LANE2_RESET |
1470 DPIO_PCS_TX_LANE1_RESET); 1766 DPIO_PCS_TX_LANE1_RESET);
@@ -1478,6 +1774,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1478 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1774 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1479 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1775 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1480 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1776 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
1777 mutex_unlock(&dev_priv->dpio_lock);
1481} 1778}
1482 1779
1483/* 1780/*
@@ -1689,6 +1986,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1689 return 0; 1986 return 0;
1690 } 1987 }
1691 1988
1989 mutex_lock(&dev_priv->dpio_lock);
1692 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000); 1990 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
1693 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value); 1991 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
1694 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 1992 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
@@ -1697,6 +1995,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1697 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 1995 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1698 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); 1996 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
1699 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000); 1997 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
1998 mutex_unlock(&dev_priv->dpio_lock);
1700 1999
1701 return 0; 2000 return 0;
1702} 2001}
@@ -2030,7 +2329,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2030 struct drm_device *dev = encoder->dev; 2329 struct drm_device *dev = encoder->dev;
2031 int i; 2330 int i;
2032 uint8_t voltage; 2331 uint8_t voltage;
2033 bool clock_recovery = false;
2034 int voltage_tries, loop_tries; 2332 int voltage_tries, loop_tries;
2035 uint32_t DP = intel_dp->DP; 2333 uint32_t DP = intel_dp->DP;
2036 2334
@@ -2048,7 +2346,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2048 voltage = 0xff; 2346 voltage = 0xff;
2049 voltage_tries = 0; 2347 voltage_tries = 0;
2050 loop_tries = 0; 2348 loop_tries = 0;
2051 clock_recovery = false;
2052 for (;;) { 2349 for (;;) {
2053 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 2350 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
2054 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2351 uint8_t link_status[DP_LINK_STATUS_SIZE];
@@ -2069,7 +2366,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2069 2366
2070 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2367 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2071 DRM_DEBUG_KMS("clock recovery OK\n"); 2368 DRM_DEBUG_KMS("clock recovery OK\n");
2072 clock_recovery = true;
2073 break; 2369 break;
2074 } 2370 }
2075 2371
@@ -2275,6 +2571,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2275 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2571 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2276 return false; /* DPCD not present */ 2572 return false; /* DPCD not present */
2277 2573
2574 /* Check if the panel supports PSR */
2575 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2576 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2577 intel_dp->psr_dpcd,
2578 sizeof(intel_dp->psr_dpcd));
2579 if (is_edp_psr(intel_dp))
2580 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2278 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2581 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2279 DP_DWN_STRM_PORT_PRESENT)) 2582 DP_DWN_STRM_PORT_PRESENT))
2280 return true; /* native DP sink */ 2583 return true; /* native DP sink */
@@ -2542,6 +2845,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2542 enum drm_connector_status status; 2845 enum drm_connector_status status;
2543 struct edid *edid = NULL; 2846 struct edid *edid = NULL;
2544 2847
2848 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
2849 connector->base.id, drm_get_connector_name(connector));
2850
2545 intel_dp->has_audio = false; 2851 intel_dp->has_audio = false;
2546 2852
2547 if (HAS_PCH_SPLIT(dev)) 2853 if (HAS_PCH_SPLIT(dev))
@@ -2735,10 +3041,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2735 kfree(intel_dig_port); 3041 kfree(intel_dig_port);
2736} 3042}
2737 3043
2738static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2739 .mode_set = intel_dp_mode_set,
2740};
2741
2742static const struct drm_connector_funcs intel_dp_connector_funcs = { 3044static const struct drm_connector_funcs intel_dp_connector_funcs = {
2743 .dpms = intel_connector_dpms, 3045 .dpms = intel_connector_dpms,
2744 .detect = intel_dp_detect, 3046 .detect = intel_dp_detect,
@@ -3166,6 +3468,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3166 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", 3468 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3167 error, port_name(port)); 3469 error, port_name(port));
3168 3470
3471 intel_dp->psr_setup_done = false;
3472
3169 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 3473 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
3170 i2c_del_adapter(&intel_dp->adapter); 3474 i2c_del_adapter(&intel_dp->adapter);
3171 if (is_edp(intel_dp)) { 3475 if (is_edp(intel_dp)) {
@@ -3216,17 +3520,21 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3216 3520
3217 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 3521 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3218 DRM_MODE_ENCODER_TMDS); 3522 DRM_MODE_ENCODER_TMDS);
3219 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
3220 3523
3221 intel_encoder->compute_config = intel_dp_compute_config; 3524 intel_encoder->compute_config = intel_dp_compute_config;
3222 intel_encoder->enable = intel_enable_dp; 3525 intel_encoder->mode_set = intel_dp_mode_set;
3223 intel_encoder->pre_enable = intel_pre_enable_dp;
3224 intel_encoder->disable = intel_disable_dp; 3526 intel_encoder->disable = intel_disable_dp;
3225 intel_encoder->post_disable = intel_post_disable_dp; 3527 intel_encoder->post_disable = intel_post_disable_dp;
3226 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3528 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3227 intel_encoder->get_config = intel_dp_get_config; 3529 intel_encoder->get_config = intel_dp_get_config;
3228 if (IS_VALLEYVIEW(dev)) 3530 if (IS_VALLEYVIEW(dev)) {
3229 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; 3531 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
3532 intel_encoder->pre_enable = vlv_pre_enable_dp;
3533 intel_encoder->enable = vlv_enable_dp;
3534 } else {
3535 intel_encoder->pre_enable = intel_pre_enable_dp;
3536 intel_encoder->enable = intel_enable_dp;
3537 }
3230 3538
3231 intel_dig_port->port = port; 3539 intel_dig_port->port = port;
3232 intel_dig_port->dp.output_reg = output_reg; 3540 intel_dig_port->dp.output_reg = output_reg;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index c8c9b6f48230..176080822a74 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,6 +26,7 @@
26#define __INTEL_DRV_H__ 26#define __INTEL_DRV_H__
27 27
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/hdmi.h>
29#include <drm/i915_drm.h> 30#include <drm/i915_drm.h>
30#include "i915_drv.h" 31#include "i915_drv.h"
31#include <drm/drm_crtc.h> 32#include <drm/drm_crtc.h>
@@ -208,10 +209,6 @@ struct intel_crtc_config {
208 209
209 struct drm_display_mode requested_mode; 210 struct drm_display_mode requested_mode;
210 struct drm_display_mode adjusted_mode; 211 struct drm_display_mode adjusted_mode;
211 /* This flag must be set by the encoder's compute_config callback if it
212 * changes the crtc timings in the mode to prevent the crtc fixup from
213 * overwriting them. Currently only lvds needs that. */
214 bool timings_set;
215 /* Whether to set up the PCH/FDI. Note that we never allow sharing 212 /* Whether to set up the PCH/FDI. Note that we never allow sharing
216 * between pch encoders and cpu encoders. */ 213 * between pch encoders and cpu encoders. */
217 bool has_pch_encoder; 214 bool has_pch_encoder;
@@ -334,6 +331,13 @@ struct intel_crtc {
334 bool pch_fifo_underrun_disabled; 331 bool pch_fifo_underrun_disabled;
335}; 332};
336 333
334struct intel_plane_wm_parameters {
335 uint32_t horiz_pixels;
336 uint8_t bytes_per_pixel;
337 bool enabled;
338 bool scaled;
339};
340
337struct intel_plane { 341struct intel_plane {
338 struct drm_plane base; 342 struct drm_plane base;
339 int plane; 343 int plane;
@@ -352,20 +356,18 @@ struct intel_plane {
352 * as the other pieces of the struct may not reflect the values we want 356 * as the other pieces of the struct may not reflect the values we want
353 * for the watermark calculations. Currently only Haswell uses this. 357 * for the watermark calculations. Currently only Haswell uses this.
354 */ 358 */
355 struct { 359 struct intel_plane_wm_parameters wm;
356 bool enable;
357 uint8_t bytes_per_pixel;
358 uint32_t horiz_pixels;
359 } wm;
360 360
361 void (*update_plane)(struct drm_plane *plane, 361 void (*update_plane)(struct drm_plane *plane,
362 struct drm_crtc *crtc,
362 struct drm_framebuffer *fb, 363 struct drm_framebuffer *fb,
363 struct drm_i915_gem_object *obj, 364 struct drm_i915_gem_object *obj,
364 int crtc_x, int crtc_y, 365 int crtc_x, int crtc_y,
365 unsigned int crtc_w, unsigned int crtc_h, 366 unsigned int crtc_w, unsigned int crtc_h,
366 uint32_t x, uint32_t y, 367 uint32_t x, uint32_t y,
367 uint32_t src_w, uint32_t src_h); 368 uint32_t src_w, uint32_t src_h);
368 void (*disable_plane)(struct drm_plane *plane); 369 void (*disable_plane)(struct drm_plane *plane,
370 struct drm_crtc *crtc);
369 int (*update_colorkey)(struct drm_plane *plane, 371 int (*update_colorkey)(struct drm_plane *plane,
370 struct drm_intel_sprite_colorkey *key); 372 struct drm_intel_sprite_colorkey *key);
371 void (*get_colorkey)(struct drm_plane *plane, 373 void (*get_colorkey)(struct drm_plane *plane,
@@ -397,66 +399,6 @@ struct cxsr_latency {
397#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 399#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
398#define to_intel_plane(x) container_of(x, struct intel_plane, base) 400#define to_intel_plane(x) container_of(x, struct intel_plane, base)
399 401
400#define DIP_HEADER_SIZE 5
401
402#define DIP_TYPE_AVI 0x82
403#define DIP_VERSION_AVI 0x2
404#define DIP_LEN_AVI 13
405#define DIP_AVI_PR_1 0
406#define DIP_AVI_PR_2 1
407#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
408#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
409#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
410
411#define DIP_TYPE_SPD 0x83
412#define DIP_VERSION_SPD 0x1
413#define DIP_LEN_SPD 25
414#define DIP_SPD_UNKNOWN 0
415#define DIP_SPD_DSTB 0x1
416#define DIP_SPD_DVDP 0x2
417#define DIP_SPD_DVHS 0x3
418#define DIP_SPD_HDDVR 0x4
419#define DIP_SPD_DVC 0x5
420#define DIP_SPD_DSC 0x6
421#define DIP_SPD_VCD 0x7
422#define DIP_SPD_GAME 0x8
423#define DIP_SPD_PC 0x9
424#define DIP_SPD_BD 0xa
425#define DIP_SPD_SCD 0xb
426
427struct dip_infoframe {
428 uint8_t type; /* HB0 */
429 uint8_t ver; /* HB1 */
430 uint8_t len; /* HB2 - body len, not including checksum */
431 uint8_t ecc; /* Header ECC */
432 uint8_t checksum; /* PB0 */
433 union {
434 struct {
435 /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
436 uint8_t Y_A_B_S;
437 /* PB2 - C 7:6, M 5:4, R 3:0 */
438 uint8_t C_M_R;
439 /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
440 uint8_t ITC_EC_Q_SC;
441 /* PB4 - VIC 6:0 */
442 uint8_t VIC;
443 /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
444 uint8_t YQ_CN_PR;
445 /* PB6 to PB13 */
446 uint16_t top_bar_end;
447 uint16_t bottom_bar_start;
448 uint16_t left_bar_end;
449 uint16_t right_bar_start;
450 } __attribute__ ((packed)) avi;
451 struct {
452 uint8_t vn[8];
453 uint8_t pd[16];
454 uint8_t sdi;
455 } __attribute__ ((packed)) spd;
456 uint8_t payload[27];
457 } __attribute__ ((packed)) body;
458} __attribute__((packed));
459
460struct intel_hdmi { 402struct intel_hdmi {
461 u32 hdmi_reg; 403 u32 hdmi_reg;
462 int ddc_bus; 404 int ddc_bus;
@@ -467,7 +409,8 @@ struct intel_hdmi {
467 enum hdmi_force_audio force_audio; 409 enum hdmi_force_audio force_audio;
468 bool rgb_quant_range_selectable; 410 bool rgb_quant_range_selectable;
469 void (*write_infoframe)(struct drm_encoder *encoder, 411 void (*write_infoframe)(struct drm_encoder *encoder,
470 struct dip_infoframe *frame); 412 enum hdmi_infoframe_type type,
413 const uint8_t *frame, ssize_t len);
471 void (*set_infoframes)(struct drm_encoder *encoder, 414 void (*set_infoframes)(struct drm_encoder *encoder,
472 struct drm_display_mode *adjusted_mode); 415 struct drm_display_mode *adjusted_mode);
473}; 416};
@@ -487,6 +430,7 @@ struct intel_dp {
487 uint8_t link_bw; 430 uint8_t link_bw;
488 uint8_t lane_count; 431 uint8_t lane_count;
489 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 432 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
433 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
490 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 434 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
491 struct i2c_adapter adapter; 435 struct i2c_adapter adapter;
492 struct i2c_algo_dp_aux_data algo; 436 struct i2c_algo_dp_aux_data algo;
@@ -498,13 +442,14 @@ struct intel_dp {
498 int backlight_off_delay; 442 int backlight_off_delay;
499 struct delayed_work panel_vdd_work; 443 struct delayed_work panel_vdd_work;
500 bool want_panel_vdd; 444 bool want_panel_vdd;
445 bool psr_setup_done;
501 struct intel_connector *attached_connector; 446 struct intel_connector *attached_connector;
502}; 447};
503 448
504struct intel_digital_port { 449struct intel_digital_port {
505 struct intel_encoder base; 450 struct intel_encoder base;
506 enum port port; 451 enum port port;
507 u32 port_reversal; 452 u32 saved_port_bits;
508 struct intel_dp dp; 453 struct intel_dp dp;
509 struct intel_hdmi hdmi; 454 struct intel_hdmi hdmi;
510}; 455};
@@ -549,13 +494,6 @@ struct intel_unpin_work {
549 bool enable_stall_check; 494 bool enable_stall_check;
550}; 495};
551 496
552struct intel_fbc_work {
553 struct delayed_work work;
554 struct drm_crtc *crtc;
555 struct drm_framebuffer *fb;
556 int interval;
557};
558
559int intel_pch_rawclk(struct drm_device *dev); 497int intel_pch_rawclk(struct drm_device *dev);
560 498
561int intel_connector_update_modes(struct drm_connector *connector, 499int intel_connector_update_modes(struct drm_connector *connector,
@@ -574,7 +512,6 @@ extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
574extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 512extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
575extern bool intel_hdmi_compute_config(struct intel_encoder *encoder, 513extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
576 struct intel_crtc_config *pipe_config); 514 struct intel_crtc_config *pipe_config);
577extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
578extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, 515extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
579 bool is_sdvob); 516 bool is_sdvob);
580extern void intel_dvo_init(struct drm_device *dev); 517extern void intel_dvo_init(struct drm_device *dev);
@@ -639,14 +576,10 @@ struct intel_set_config {
639 bool mode_changed; 576 bool mode_changed;
640}; 577};
641 578
642extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
643 int x, int y, struct drm_framebuffer *old_fb);
644extern void intel_modeset_disable(struct drm_device *dev);
645extern void intel_crtc_restore_mode(struct drm_crtc *crtc); 579extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
646extern void intel_crtc_load_lut(struct drm_crtc *crtc); 580extern void intel_crtc_load_lut(struct drm_crtc *crtc);
647extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 581extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
648extern void intel_encoder_destroy(struct drm_encoder *encoder); 582extern void intel_encoder_destroy(struct drm_encoder *encoder);
649extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
650extern void intel_connector_dpms(struct drm_connector *, int mode); 583extern void intel_connector_dpms(struct drm_connector *, int mode);
651extern bool intel_connector_get_hw_state(struct intel_connector *connector); 584extern bool intel_connector_get_hw_state(struct intel_connector *connector);
652extern void intel_modeset_check_state(struct drm_device *dev); 585extern void intel_modeset_check_state(struct drm_device *dev);
@@ -712,12 +645,10 @@ extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
712extern void intel_release_load_detect_pipe(struct drm_connector *connector, 645extern void intel_release_load_detect_pipe(struct drm_connector *connector,
713 struct intel_load_detect_pipe *old); 646 struct intel_load_detect_pipe *old);
714 647
715extern void intelfb_restore(void);
716extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 648extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
717 u16 blue, int regno); 649 u16 blue, int regno);
718extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 650extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
719 u16 *blue, int regno); 651 u16 *blue, int regno);
720extern void intel_enable_clock_gating(struct drm_device *dev);
721 652
722extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 653extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
723 struct drm_i915_gem_object *obj, 654 struct drm_i915_gem_object *obj,
@@ -728,6 +659,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
728 struct intel_framebuffer *ifb, 659 struct intel_framebuffer *ifb,
729 struct drm_mode_fb_cmd2 *mode_cmd, 660 struct drm_mode_fb_cmd2 *mode_cmd,
730 struct drm_i915_gem_object *obj); 661 struct drm_i915_gem_object *obj);
662extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
731extern int intel_fbdev_init(struct drm_device *dev); 663extern int intel_fbdev_init(struct drm_device *dev);
732extern void intel_fbdev_initial_config(struct drm_device *dev); 664extern void intel_fbdev_initial_config(struct drm_device *dev);
733extern void intel_fbdev_fini(struct drm_device *dev); 665extern void intel_fbdev_fini(struct drm_device *dev);
@@ -747,6 +679,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
747extern void intel_fb_output_poll_changed(struct drm_device *dev); 679extern void intel_fb_output_poll_changed(struct drm_device *dev);
748extern void intel_fb_restore_mode(struct drm_device *dev); 680extern void intel_fb_restore_mode(struct drm_device *dev);
749 681
682struct intel_shared_dpll *
683intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
684
685void assert_shared_dpll(struct drm_i915_private *dev_priv,
686 struct intel_shared_dpll *pll,
687 bool state);
688#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
689#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
690void assert_pll(struct drm_i915_private *dev_priv,
691 enum pipe pipe, bool state);
692#define assert_pll_enabled(d, p) assert_pll(d, p, true)
693#define assert_pll_disabled(d, p) assert_pll(d, p, false)
694void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
695 enum pipe pipe, bool state);
696#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
697#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
750extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 698extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
751 bool state); 699 bool state);
752#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) 700#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
@@ -762,9 +710,10 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port);
762 710
763/* For use by IVB LP watermark workaround in intel_sprite.c */ 711/* For use by IVB LP watermark workaround in intel_sprite.c */
764extern void intel_update_watermarks(struct drm_device *dev); 712extern void intel_update_watermarks(struct drm_device *dev);
765extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 713extern void intel_update_sprite_watermarks(struct drm_plane *plane,
766 uint32_t sprite_width, 714 struct drm_crtc *crtc,
767 int pixel_size, bool enable); 715 uint32_t sprite_width, int pixel_size,
716 bool enabled, bool scaled);
768 717
769extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, 718extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
770 unsigned int tiling_mode, 719 unsigned int tiling_mode,
@@ -780,7 +729,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
780extern void intel_init_pm(struct drm_device *dev); 729extern void intel_init_pm(struct drm_device *dev);
781/* FBC */ 730/* FBC */
782extern bool intel_fbc_enabled(struct drm_device *dev); 731extern bool intel_fbc_enabled(struct drm_device *dev);
783extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
784extern void intel_update_fbc(struct drm_device *dev); 732extern void intel_update_fbc(struct drm_device *dev);
785/* IPS */ 733/* IPS */
786extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 734extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -796,8 +744,8 @@ extern void intel_init_power_well(struct drm_device *dev);
796extern void intel_set_power_well(struct drm_device *dev, bool enable); 744extern void intel_set_power_well(struct drm_device *dev, bool enable);
797extern void intel_enable_gt_powersave(struct drm_device *dev); 745extern void intel_enable_gt_powersave(struct drm_device *dev);
798extern void intel_disable_gt_powersave(struct drm_device *dev); 746extern void intel_disable_gt_powersave(struct drm_device *dev);
799extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
800extern void ironlake_teardown_rc6(struct drm_device *dev); 747extern void ironlake_teardown_rc6(struct drm_device *dev);
748void gen6_update_ring_freq(struct drm_device *dev);
801 749
802extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 750extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
803 enum pipe *pipe); 751 enum pipe *pipe);
@@ -825,4 +773,24 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
825 enum transcoder pch_transcoder, 773 enum transcoder pch_transcoder,
826 bool enable); 774 bool enable);
827 775
776extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
777extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
778extern void intel_edp_psr_update(struct drm_device *dev);
779extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
780 bool switch_to_fclk, bool allow_power_down);
781extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
782extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
783extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
784 uint32_t mask);
785extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
786extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
787 uint32_t mask);
788extern void hsw_enable_pc8_work(struct work_struct *__work);
789extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
790extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
791extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
792extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
793extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
794extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
795
828#endif /* __INTEL_DRV_H__ */ 796#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index eb2020eb2b7e..406303b509c1 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -100,15 +100,14 @@ struct intel_dvo {
100 bool panel_wants_dither; 100 bool panel_wants_dither;
101}; 101};
102 102
103static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) 103static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
104{ 104{
105 return container_of(encoder, struct intel_dvo, base.base); 105 return container_of(encoder, struct intel_dvo, base);
106} 106}
107 107
108static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) 108static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
109{ 109{
110 return container_of(intel_attached_encoder(connector), 110 return enc_to_dvo(intel_attached_encoder(connector));
111 struct intel_dvo, base);
112} 111}
113 112
114static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) 113static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
@@ -123,7 +122,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
123{ 122{
124 struct drm_device *dev = encoder->base.dev; 123 struct drm_device *dev = encoder->base.dev;
125 struct drm_i915_private *dev_priv = dev->dev_private; 124 struct drm_i915_private *dev_priv = dev->dev_private;
126 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); 125 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
127 u32 tmp; 126 u32 tmp;
128 127
129 tmp = I915_READ(intel_dvo->dev.dvo_reg); 128 tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -140,7 +139,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
140 struct intel_crtc_config *pipe_config) 139 struct intel_crtc_config *pipe_config)
141{ 140{
142 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 141 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
143 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); 142 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
144 u32 tmp, flags = 0; 143 u32 tmp, flags = 0;
145 144
146 tmp = I915_READ(intel_dvo->dev.dvo_reg); 145 tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -159,7 +158,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
159static void intel_disable_dvo(struct intel_encoder *encoder) 158static void intel_disable_dvo(struct intel_encoder *encoder)
160{ 159{
161 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 160 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
162 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); 161 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
163 u32 dvo_reg = intel_dvo->dev.dvo_reg; 162 u32 dvo_reg = intel_dvo->dev.dvo_reg;
164 u32 temp = I915_READ(dvo_reg); 163 u32 temp = I915_READ(dvo_reg);
165 164
@@ -171,7 +170,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
171static void intel_enable_dvo(struct intel_encoder *encoder) 170static void intel_enable_dvo(struct intel_encoder *encoder)
172{ 171{
173 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 172 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
174 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); 173 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
175 u32 dvo_reg = intel_dvo->dev.dvo_reg; 174 u32 dvo_reg = intel_dvo->dev.dvo_reg;
176 u32 temp = I915_READ(dvo_reg); 175 u32 temp = I915_READ(dvo_reg);
177 176
@@ -241,11 +240,11 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
241 return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); 240 return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
242} 241}
243 242
244static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, 243static bool intel_dvo_compute_config(struct intel_encoder *encoder,
245 const struct drm_display_mode *mode, 244 struct intel_crtc_config *pipe_config)
246 struct drm_display_mode *adjusted_mode)
247{ 245{
248 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 246 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
247 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
249 248
250 /* If we have timings from the BIOS for the panel, put them in 249 /* If we have timings from the BIOS for the panel, put them in
251 * to the adjusted mode. The CRTC will be set up for this mode, 250 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -267,23 +266,23 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
267 } 266 }
268 267
269 if (intel_dvo->dev.dev_ops->mode_fixup) 268 if (intel_dvo->dev.dev_ops->mode_fixup)
270 return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode); 269 return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
270 &pipe_config->requested_mode,
271 adjusted_mode);
271 272
272 return true; 273 return true;
273} 274}
274 275
275static void intel_dvo_mode_set(struct drm_encoder *encoder, 276static void intel_dvo_mode_set(struct intel_encoder *encoder)
276 struct drm_display_mode *mode,
277 struct drm_display_mode *adjusted_mode)
278{ 277{
279 struct drm_device *dev = encoder->dev; 278 struct drm_device *dev = encoder->base.dev;
280 struct drm_i915_private *dev_priv = dev->dev_private; 279 struct drm_i915_private *dev_priv = dev->dev_private;
281 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 280 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
282 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 281 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
283 int pipe = intel_crtc->pipe; 282 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
283 int pipe = crtc->pipe;
284 u32 dvo_val; 284 u32 dvo_val;
285 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; 285 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
286 int dpll_reg = DPLL(pipe);
287 286
288 switch (dvo_reg) { 287 switch (dvo_reg) {
289 case DVOA: 288 case DVOA:
@@ -298,7 +297,9 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
298 break; 297 break;
299 } 298 }
300 299
301 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode); 300 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
301 &crtc->config.requested_mode,
302 adjusted_mode);
302 303
303 /* Save the data order, since I don't know what it should be set to. */ 304 /* Save the data order, since I don't know what it should be set to. */
304 dvo_val = I915_READ(dvo_reg) & 305 dvo_val = I915_READ(dvo_reg) &
@@ -314,8 +315,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
314 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 315 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
315 dvo_val |= DVO_VSYNC_ACTIVE_HIGH; 316 dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
316 317
317 I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
318
319 /*I915_WRITE(DVOB_SRCDIM, 318 /*I915_WRITE(DVOB_SRCDIM,
320 (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | 319 (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
321 (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ 320 (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
@@ -335,6 +334,8 @@ static enum drm_connector_status
335intel_dvo_detect(struct drm_connector *connector, bool force) 334intel_dvo_detect(struct drm_connector *connector, bool force)
336{ 335{
337 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 336 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
337 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
338 connector->base.id, drm_get_connector_name(connector));
338 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); 339 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
339} 340}
340 341
@@ -372,11 +373,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
372 kfree(connector); 373 kfree(connector);
373} 374}
374 375
375static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
376 .mode_fixup = intel_dvo_mode_fixup,
377 .mode_set = intel_dvo_mode_set,
378};
379
380static const struct drm_connector_funcs intel_dvo_connector_funcs = { 376static const struct drm_connector_funcs intel_dvo_connector_funcs = {
381 .dpms = intel_dvo_dpms, 377 .dpms = intel_dvo_dpms,
382 .detect = intel_dvo_detect, 378 .detect = intel_dvo_detect,
@@ -392,7 +388,7 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
392 388
393static void intel_dvo_enc_destroy(struct drm_encoder *encoder) 389static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
394{ 390{
395 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 391 struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
396 392
397 if (intel_dvo->dev.dev_ops->destroy) 393 if (intel_dvo->dev.dev_ops->destroy)
398 intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev); 394 intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
@@ -471,6 +467,8 @@ void intel_dvo_init(struct drm_device *dev)
471 intel_encoder->enable = intel_enable_dvo; 467 intel_encoder->enable = intel_enable_dvo;
472 intel_encoder->get_hw_state = intel_dvo_get_hw_state; 468 intel_encoder->get_hw_state = intel_dvo_get_hw_state;
473 intel_encoder->get_config = intel_dvo_get_config; 469 intel_encoder->get_config = intel_dvo_get_config;
470 intel_encoder->compute_config = intel_dvo_compute_config;
471 intel_encoder->mode_set = intel_dvo_mode_set;
474 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; 472 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
475 473
476 /* Now, try to find a controller */ 474 /* Now, try to find a controller */
@@ -537,9 +535,6 @@ void intel_dvo_init(struct drm_device *dev)
537 connector->interlace_allowed = false; 535 connector->interlace_allowed = false;
538 connector->doublescan_allowed = false; 536 connector->doublescan_allowed = false;
539 537
540 drm_encoder_helper_add(&intel_encoder->base,
541 &intel_dvo_helper_funcs);
542
543 intel_connector_attach_encoder(intel_connector, intel_encoder); 538 intel_connector_attach_encoder(intel_connector, intel_encoder);
544 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 539 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
545 /* For our LVDS chipsets, we should hopefully be able 540 /* For our LVDS chipsets, we should hopefully be able
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index dff669e2387f..bc2100007b21 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
139 info->apertures->ranges[0].base = dev->mode_config.fb_base; 139 info->apertures->ranges[0].base = dev->mode_config.fb_base;
140 info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; 140 info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
141 141
142 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; 142 info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
143 info->fix.smem_len = size; 143 info->fix.smem_len = size;
144 144
145 info->screen_base = 145 info->screen_base =
146 ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, 146 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
147 size); 147 size);
148 if (!info->screen_base) { 148 if (!info->screen_base) {
149 ret = -ENOSPC; 149 ret = -ENOSPC;
@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
166 166
167 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 167 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
168 168
169 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", 169 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
170 fb->width, fb->height, 170 fb->width, fb->height,
171 obj->gtt_offset, obj); 171 i915_gem_obj_ggtt_offset(obj), obj);
172 172
173 173
174 mutex_unlock(&dev->struct_mutex); 174 mutex_unlock(&dev->struct_mutex);
@@ -193,26 +193,21 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
193static void intel_fbdev_destroy(struct drm_device *dev, 193static void intel_fbdev_destroy(struct drm_device *dev,
194 struct intel_fbdev *ifbdev) 194 struct intel_fbdev *ifbdev)
195{ 195{
196 struct fb_info *info;
197 struct intel_framebuffer *ifb = &ifbdev->ifb;
198
199 if (ifbdev->helper.fbdev) { 196 if (ifbdev->helper.fbdev) {
200 info = ifbdev->helper.fbdev; 197 struct fb_info *info = ifbdev->helper.fbdev;
198
201 unregister_framebuffer(info); 199 unregister_framebuffer(info);
202 iounmap(info->screen_base); 200 iounmap(info->screen_base);
203 if (info->cmap.len) 201 if (info->cmap.len)
204 fb_dealloc_cmap(&info->cmap); 202 fb_dealloc_cmap(&info->cmap);
203
205 framebuffer_release(info); 204 framebuffer_release(info);
206 } 205 }
207 206
208 drm_fb_helper_fini(&ifbdev->helper); 207 drm_fb_helper_fini(&ifbdev->helper);
209 208
210 drm_framebuffer_unregister_private(&ifb->base); 209 drm_framebuffer_unregister_private(&ifbdev->ifb.base);
211 drm_framebuffer_cleanup(&ifb->base); 210 intel_framebuffer_fini(&ifbdev->ifb);
212 if (ifb->obj) {
213 drm_gem_object_unreference_unlocked(&ifb->obj->base);
214 ifb->obj = NULL;
215 }
216} 211}
217 212
218int intel_fbdev_init(struct drm_device *dev) 213int intel_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 98df2a0c85bd..4148cc85bf7f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -29,6 +29,7 @@
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/hdmi.h>
32#include <drm/drmP.h> 33#include <drm/drmP.h>
33#include <drm/drm_crtc.h> 34#include <drm/drm_crtc.h>
34#include <drm/drm_edid.h> 35#include <drm/drm_edid.h>
@@ -66,89 +67,83 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
66 return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); 67 return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
67} 68}
68 69
69void intel_dip_infoframe_csum(struct dip_infoframe *frame) 70static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
70{ 71{
71 uint8_t *data = (uint8_t *)frame; 72 switch (type) {
72 uint8_t sum = 0; 73 case HDMI_INFOFRAME_TYPE_AVI:
73 unsigned i;
74
75 frame->checksum = 0;
76 frame->ecc = 0;
77
78 for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
79 sum += data[i];
80
81 frame->checksum = 0x100 - sum;
82}
83
84static u32 g4x_infoframe_index(struct dip_infoframe *frame)
85{
86 switch (frame->type) {
87 case DIP_TYPE_AVI:
88 return VIDEO_DIP_SELECT_AVI; 74 return VIDEO_DIP_SELECT_AVI;
89 case DIP_TYPE_SPD: 75 case HDMI_INFOFRAME_TYPE_SPD:
90 return VIDEO_DIP_SELECT_SPD; 76 return VIDEO_DIP_SELECT_SPD;
77 case HDMI_INFOFRAME_TYPE_VENDOR:
78 return VIDEO_DIP_SELECT_VENDOR;
91 default: 79 default:
92 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 80 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
93 return 0; 81 return 0;
94 } 82 }
95} 83}
96 84
97static u32 g4x_infoframe_enable(struct dip_infoframe *frame) 85static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
98{ 86{
99 switch (frame->type) { 87 switch (type) {
100 case DIP_TYPE_AVI: 88 case HDMI_INFOFRAME_TYPE_AVI:
101 return VIDEO_DIP_ENABLE_AVI; 89 return VIDEO_DIP_ENABLE_AVI;
102 case DIP_TYPE_SPD: 90 case HDMI_INFOFRAME_TYPE_SPD:
103 return VIDEO_DIP_ENABLE_SPD; 91 return VIDEO_DIP_ENABLE_SPD;
92 case HDMI_INFOFRAME_TYPE_VENDOR:
93 return VIDEO_DIP_ENABLE_VENDOR;
104 default: 94 default:
105 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 95 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
106 return 0; 96 return 0;
107 } 97 }
108} 98}
109 99
110static u32 hsw_infoframe_enable(struct dip_infoframe *frame) 100static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
111{ 101{
112 switch (frame->type) { 102 switch (type) {
113 case DIP_TYPE_AVI: 103 case HDMI_INFOFRAME_TYPE_AVI:
114 return VIDEO_DIP_ENABLE_AVI_HSW; 104 return VIDEO_DIP_ENABLE_AVI_HSW;
115 case DIP_TYPE_SPD: 105 case HDMI_INFOFRAME_TYPE_SPD:
116 return VIDEO_DIP_ENABLE_SPD_HSW; 106 return VIDEO_DIP_ENABLE_SPD_HSW;
107 case HDMI_INFOFRAME_TYPE_VENDOR:
108 return VIDEO_DIP_ENABLE_VS_HSW;
117 default: 109 default:
118 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 110 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
119 return 0; 111 return 0;
120 } 112 }
121} 113}
122 114
123static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, 115static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
124 enum transcoder cpu_transcoder) 116 enum transcoder cpu_transcoder)
125{ 117{
126 switch (frame->type) { 118 switch (type) {
127 case DIP_TYPE_AVI: 119 case HDMI_INFOFRAME_TYPE_AVI:
128 return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); 120 return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
129 case DIP_TYPE_SPD: 121 case HDMI_INFOFRAME_TYPE_SPD:
130 return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); 122 return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
123 case HDMI_INFOFRAME_TYPE_VENDOR:
124 return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder);
131 default: 125 default:
132 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 126 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
133 return 0; 127 return 0;
134 } 128 }
135} 129}
136 130
137static void g4x_write_infoframe(struct drm_encoder *encoder, 131static void g4x_write_infoframe(struct drm_encoder *encoder,
138 struct dip_infoframe *frame) 132 enum hdmi_infoframe_type type,
133 const uint8_t *frame, ssize_t len)
139{ 134{
140 uint32_t *data = (uint32_t *)frame; 135 uint32_t *data = (uint32_t *)frame;
141 struct drm_device *dev = encoder->dev; 136 struct drm_device *dev = encoder->dev;
142 struct drm_i915_private *dev_priv = dev->dev_private; 137 struct drm_i915_private *dev_priv = dev->dev_private;
143 u32 val = I915_READ(VIDEO_DIP_CTL); 138 u32 val = I915_READ(VIDEO_DIP_CTL);
144 unsigned i, len = DIP_HEADER_SIZE + frame->len; 139 int i;
145 140
146 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 141 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
147 142
148 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 143 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
149 val |= g4x_infoframe_index(frame); 144 val |= g4x_infoframe_index(type);
150 145
151 val &= ~g4x_infoframe_enable(frame); 146 val &= ~g4x_infoframe_enable(type);
152 147
153 I915_WRITE(VIDEO_DIP_CTL, val); 148 I915_WRITE(VIDEO_DIP_CTL, val);
154 149
@@ -162,7 +157,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
162 I915_WRITE(VIDEO_DIP_DATA, 0); 157 I915_WRITE(VIDEO_DIP_DATA, 0);
163 mmiowb(); 158 mmiowb();
164 159
165 val |= g4x_infoframe_enable(frame); 160 val |= g4x_infoframe_enable(type);
166 val &= ~VIDEO_DIP_FREQ_MASK; 161 val &= ~VIDEO_DIP_FREQ_MASK;
167 val |= VIDEO_DIP_FREQ_VSYNC; 162 val |= VIDEO_DIP_FREQ_VSYNC;
168 163
@@ -171,22 +166,22 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
171} 166}
172 167
173static void ibx_write_infoframe(struct drm_encoder *encoder, 168static void ibx_write_infoframe(struct drm_encoder *encoder,
174 struct dip_infoframe *frame) 169 enum hdmi_infoframe_type type,
170 const uint8_t *frame, ssize_t len)
175{ 171{
176 uint32_t *data = (uint32_t *)frame; 172 uint32_t *data = (uint32_t *)frame;
177 struct drm_device *dev = encoder->dev; 173 struct drm_device *dev = encoder->dev;
178 struct drm_i915_private *dev_priv = dev->dev_private; 174 struct drm_i915_private *dev_priv = dev->dev_private;
179 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 175 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
180 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 176 int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
181 unsigned i, len = DIP_HEADER_SIZE + frame->len;
182 u32 val = I915_READ(reg); 177 u32 val = I915_READ(reg);
183 178
184 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 179 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
185 180
186 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 181 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
187 val |= g4x_infoframe_index(frame); 182 val |= g4x_infoframe_index(type);
188 183
189 val &= ~g4x_infoframe_enable(frame); 184 val &= ~g4x_infoframe_enable(type);
190 185
191 I915_WRITE(reg, val); 186 I915_WRITE(reg, val);
192 187
@@ -200,7 +195,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
200 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); 195 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
201 mmiowb(); 196 mmiowb();
202 197
203 val |= g4x_infoframe_enable(frame); 198 val |= g4x_infoframe_enable(type);
204 val &= ~VIDEO_DIP_FREQ_MASK; 199 val &= ~VIDEO_DIP_FREQ_MASK;
205 val |= VIDEO_DIP_FREQ_VSYNC; 200 val |= VIDEO_DIP_FREQ_VSYNC;
206 201
@@ -209,25 +204,25 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
209} 204}
210 205
211static void cpt_write_infoframe(struct drm_encoder *encoder, 206static void cpt_write_infoframe(struct drm_encoder *encoder,
212 struct dip_infoframe *frame) 207 enum hdmi_infoframe_type type,
208 const uint8_t *frame, ssize_t len)
213{ 209{
214 uint32_t *data = (uint32_t *)frame; 210 uint32_t *data = (uint32_t *)frame;
215 struct drm_device *dev = encoder->dev; 211 struct drm_device *dev = encoder->dev;
216 struct drm_i915_private *dev_priv = dev->dev_private; 212 struct drm_i915_private *dev_priv = dev->dev_private;
217 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 213 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
218 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 214 int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
219 unsigned i, len = DIP_HEADER_SIZE + frame->len;
220 u32 val = I915_READ(reg); 215 u32 val = I915_READ(reg);
221 216
222 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 217 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
223 218
224 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 219 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
225 val |= g4x_infoframe_index(frame); 220 val |= g4x_infoframe_index(type);
226 221
227 /* The DIP control register spec says that we need to update the AVI 222 /* The DIP control register spec says that we need to update the AVI
228 * infoframe without clearing its enable bit */ 223 * infoframe without clearing its enable bit */
229 if (frame->type != DIP_TYPE_AVI) 224 if (type != HDMI_INFOFRAME_TYPE_AVI)
230 val &= ~g4x_infoframe_enable(frame); 225 val &= ~g4x_infoframe_enable(type);
231 226
232 I915_WRITE(reg, val); 227 I915_WRITE(reg, val);
233 228
@@ -241,7 +236,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
241 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); 236 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
242 mmiowb(); 237 mmiowb();
243 238
244 val |= g4x_infoframe_enable(frame); 239 val |= g4x_infoframe_enable(type);
245 val &= ~VIDEO_DIP_FREQ_MASK; 240 val &= ~VIDEO_DIP_FREQ_MASK;
246 val |= VIDEO_DIP_FREQ_VSYNC; 241 val |= VIDEO_DIP_FREQ_VSYNC;
247 242
@@ -250,22 +245,22 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
250} 245}
251 246
252static void vlv_write_infoframe(struct drm_encoder *encoder, 247static void vlv_write_infoframe(struct drm_encoder *encoder,
253 struct dip_infoframe *frame) 248 enum hdmi_infoframe_type type,
249 const uint8_t *frame, ssize_t len)
254{ 250{
255 uint32_t *data = (uint32_t *)frame; 251 uint32_t *data = (uint32_t *)frame;
256 struct drm_device *dev = encoder->dev; 252 struct drm_device *dev = encoder->dev;
257 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
258 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 254 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
259 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 255 int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
260 unsigned i, len = DIP_HEADER_SIZE + frame->len;
261 u32 val = I915_READ(reg); 256 u32 val = I915_READ(reg);
262 257
263 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 258 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
264 259
265 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 260 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
266 val |= g4x_infoframe_index(frame); 261 val |= g4x_infoframe_index(type);
267 262
268 val &= ~g4x_infoframe_enable(frame); 263 val &= ~g4x_infoframe_enable(type);
269 264
270 I915_WRITE(reg, val); 265 I915_WRITE(reg, val);
271 266
@@ -279,7 +274,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
279 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); 274 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
280 mmiowb(); 275 mmiowb();
281 276
282 val |= g4x_infoframe_enable(frame); 277 val |= g4x_infoframe_enable(type);
283 val &= ~VIDEO_DIP_FREQ_MASK; 278 val &= ~VIDEO_DIP_FREQ_MASK;
284 val |= VIDEO_DIP_FREQ_VSYNC; 279 val |= VIDEO_DIP_FREQ_VSYNC;
285 280
@@ -288,21 +283,24 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
288} 283}
289 284
290static void hsw_write_infoframe(struct drm_encoder *encoder, 285static void hsw_write_infoframe(struct drm_encoder *encoder,
291 struct dip_infoframe *frame) 286 enum hdmi_infoframe_type type,
287 const uint8_t *frame, ssize_t len)
292{ 288{
293 uint32_t *data = (uint32_t *)frame; 289 uint32_t *data = (uint32_t *)frame;
294 struct drm_device *dev = encoder->dev; 290 struct drm_device *dev = encoder->dev;
295 struct drm_i915_private *dev_priv = dev->dev_private; 291 struct drm_i915_private *dev_priv = dev->dev_private;
296 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 292 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
297 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); 293 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
298 u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder); 294 u32 data_reg;
299 unsigned int i, len = DIP_HEADER_SIZE + frame->len; 295 int i;
300 u32 val = I915_READ(ctl_reg); 296 u32 val = I915_READ(ctl_reg);
301 297
298 data_reg = hsw_infoframe_data_reg(type,
299 intel_crtc->config.cpu_transcoder);
302 if (data_reg == 0) 300 if (data_reg == 0)
303 return; 301 return;
304 302
305 val &= ~hsw_infoframe_enable(frame); 303 val &= ~hsw_infoframe_enable(type);
306 I915_WRITE(ctl_reg, val); 304 I915_WRITE(ctl_reg, val);
307 305
308 mmiowb(); 306 mmiowb();
@@ -315,18 +313,48 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
315 I915_WRITE(data_reg + i, 0); 313 I915_WRITE(data_reg + i, 0);
316 mmiowb(); 314 mmiowb();
317 315
318 val |= hsw_infoframe_enable(frame); 316 val |= hsw_infoframe_enable(type);
319 I915_WRITE(ctl_reg, val); 317 I915_WRITE(ctl_reg, val);
320 POSTING_READ(ctl_reg); 318 POSTING_READ(ctl_reg);
321} 319}
322 320
323static void intel_set_infoframe(struct drm_encoder *encoder, 321/*
324 struct dip_infoframe *frame) 322 * The data we write to the DIP data buffer registers is 1 byte bigger than the
323 * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
324 * at 0). It's also a byte used by DisplayPort so the same DIP registers can be
325 * used for both technologies.
326 *
327 * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
328 * DW1: DB3 | DB2 | DB1 | DB0
329 * DW2: DB7 | DB6 | DB5 | DB4
330 * DW3: ...
331 *
332 * (HB is Header Byte, DB is Data Byte)
333 *
334 * The hdmi pack() functions don't know about that hardware specific hole so we
335 * trick them by giving an offset into the buffer and moving back the header
336 * bytes by one.
337 */
338static void intel_write_infoframe(struct drm_encoder *encoder,
339 union hdmi_infoframe *frame)
325{ 340{
326 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 341 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
342 uint8_t buffer[VIDEO_DIP_DATA_SIZE];
343 ssize_t len;
327 344
328 intel_dip_infoframe_csum(frame); 345 /* see comment above for the reason for this offset */
329 intel_hdmi->write_infoframe(encoder, frame); 346 len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
347 if (len < 0)
348 return;
349
350 /* Insert the 'hole' (see big comment above) at position 3 */
351 buffer[0] = buffer[1];
352 buffer[1] = buffer[2];
353 buffer[2] = buffer[3];
354 buffer[3] = 0;
355 len++;
356
357 intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
330} 358}
331 359
332static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, 360static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
@@ -334,40 +362,57 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
334{ 362{
335 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 363 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
336 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 364 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
337 struct dip_infoframe avi_if = { 365 union hdmi_infoframe frame;
338 .type = DIP_TYPE_AVI, 366 int ret;
339 .ver = DIP_VERSION_AVI,
340 .len = DIP_LEN_AVI,
341 };
342 367
343 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 368 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
344 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; 369 adjusted_mode);
370 if (ret < 0) {
371 DRM_ERROR("couldn't fill AVI infoframe\n");
372 return;
373 }
345 374
346 if (intel_hdmi->rgb_quant_range_selectable) { 375 if (intel_hdmi->rgb_quant_range_selectable) {
347 if (intel_crtc->config.limited_color_range) 376 if (intel_crtc->config.limited_color_range)
348 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; 377 frame.avi.quantization_range =
378 HDMI_QUANTIZATION_RANGE_LIMITED;
349 else 379 else
350 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; 380 frame.avi.quantization_range =
381 HDMI_QUANTIZATION_RANGE_FULL;
351 } 382 }
352 383
353 avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); 384 intel_write_infoframe(encoder, &frame);
354
355 intel_set_infoframe(encoder, &avi_if);
356} 385}
357 386
358static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) 387static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
359{ 388{
360 struct dip_infoframe spd_if; 389 union hdmi_infoframe frame;
390 int ret;
391
392 ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
393 if (ret < 0) {
394 DRM_ERROR("couldn't fill SPD infoframe\n");
395 return;
396 }
361 397
362 memset(&spd_if, 0, sizeof(spd_if)); 398 frame.spd.sdi = HDMI_SPD_SDI_PC;
363 spd_if.type = DIP_TYPE_SPD;
364 spd_if.ver = DIP_VERSION_SPD;
365 spd_if.len = DIP_LEN_SPD;
366 strcpy(spd_if.body.spd.vn, "Intel");
367 strcpy(spd_if.body.spd.pd, "Integrated gfx");
368 spd_if.body.spd.sdi = DIP_SPD_PC;
369 399
370 intel_set_infoframe(encoder, &spd_if); 400 intel_write_infoframe(encoder, &frame);
401}
402
403static void
404intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
405 struct drm_display_mode *adjusted_mode)
406{
407 union hdmi_infoframe frame;
408 int ret;
409
410 ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
411 adjusted_mode);
412 if (ret < 0)
413 return;
414
415 intel_write_infoframe(encoder, &frame);
371} 416}
372 417
373static void g4x_set_infoframes(struct drm_encoder *encoder, 418static void g4x_set_infoframes(struct drm_encoder *encoder,
@@ -432,6 +477,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
432 477
433 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 478 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
434 intel_hdmi_set_spd_infoframe(encoder); 479 intel_hdmi_set_spd_infoframe(encoder);
480 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
435} 481}
436 482
437static void ibx_set_infoframes(struct drm_encoder *encoder, 483static void ibx_set_infoframes(struct drm_encoder *encoder,
@@ -493,6 +539,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
493 539
494 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 540 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
495 intel_hdmi_set_spd_infoframe(encoder); 541 intel_hdmi_set_spd_infoframe(encoder);
542 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
496} 543}
497 544
498static void cpt_set_infoframes(struct drm_encoder *encoder, 545static void cpt_set_infoframes(struct drm_encoder *encoder,
@@ -528,6 +575,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
528 575
529 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 576 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
530 intel_hdmi_set_spd_infoframe(encoder); 577 intel_hdmi_set_spd_infoframe(encoder);
578 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
531} 579}
532 580
533static void vlv_set_infoframes(struct drm_encoder *encoder, 581static void vlv_set_infoframes(struct drm_encoder *encoder,
@@ -562,6 +610,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
562 610
563 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 611 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
564 intel_hdmi_set_spd_infoframe(encoder); 612 intel_hdmi_set_spd_infoframe(encoder);
613 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
565} 614}
566 615
567static void hsw_set_infoframes(struct drm_encoder *encoder, 616static void hsw_set_infoframes(struct drm_encoder *encoder,
@@ -589,16 +638,16 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
589 638
590 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 639 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
591 intel_hdmi_set_spd_infoframe(encoder); 640 intel_hdmi_set_spd_infoframe(encoder);
641 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
592} 642}
593 643
594static void intel_hdmi_mode_set(struct drm_encoder *encoder, 644static void intel_hdmi_mode_set(struct intel_encoder *encoder)
595 struct drm_display_mode *mode,
596 struct drm_display_mode *adjusted_mode)
597{ 645{
598 struct drm_device *dev = encoder->dev; 646 struct drm_device *dev = encoder->base.dev;
599 struct drm_i915_private *dev_priv = dev->dev_private; 647 struct drm_i915_private *dev_priv = dev->dev_private;
600 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 648 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
601 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 649 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
650 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
602 u32 hdmi_val; 651 u32 hdmi_val;
603 652
604 hdmi_val = SDVO_ENCODING_HDMI; 653 hdmi_val = SDVO_ENCODING_HDMI;
@@ -609,7 +658,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
609 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 658 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
610 hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; 659 hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
611 660
612 if (intel_crtc->config.pipe_bpp > 24) 661 if (crtc->config.pipe_bpp > 24)
613 hdmi_val |= HDMI_COLOR_FORMAT_12bpc; 662 hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
614 else 663 else
615 hdmi_val |= SDVO_COLOR_FORMAT_8bpc; 664 hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
@@ -620,21 +669,21 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
620 669
621 if (intel_hdmi->has_audio) { 670 if (intel_hdmi->has_audio) {
622 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", 671 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
623 pipe_name(intel_crtc->pipe)); 672 pipe_name(crtc->pipe));
624 hdmi_val |= SDVO_AUDIO_ENABLE; 673 hdmi_val |= SDVO_AUDIO_ENABLE;
625 hdmi_val |= HDMI_MODE_SELECT_HDMI; 674 hdmi_val |= HDMI_MODE_SELECT_HDMI;
626 intel_write_eld(encoder, adjusted_mode); 675 intel_write_eld(&encoder->base, adjusted_mode);
627 } 676 }
628 677
629 if (HAS_PCH_CPT(dev)) 678 if (HAS_PCH_CPT(dev))
630 hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); 679 hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
631 else 680 else
632 hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe); 681 hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
633 682
634 I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); 683 I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
635 POSTING_READ(intel_hdmi->hdmi_reg); 684 POSTING_READ(intel_hdmi->hdmi_reg);
636 685
637 intel_hdmi->set_infoframes(encoder, adjusted_mode); 686 intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
638} 687}
639 688
640static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, 689static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -719,14 +768,10 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
719 I915_WRITE(intel_hdmi->hdmi_reg, temp); 768 I915_WRITE(intel_hdmi->hdmi_reg, temp);
720 POSTING_READ(intel_hdmi->hdmi_reg); 769 POSTING_READ(intel_hdmi->hdmi_reg);
721 } 770 }
771}
722 772
723 if (IS_VALLEYVIEW(dev)) { 773static void vlv_enable_hdmi(struct intel_encoder *encoder)
724 struct intel_digital_port *dport = 774{
725 enc_to_dig_port(&encoder->base);
726 int channel = vlv_dport_to_channel(dport);
727
728 vlv_wait_port_ready(dev_priv, channel);
729 }
730} 775}
731 776
732static void intel_disable_hdmi(struct intel_encoder *encoder) 777static void intel_disable_hdmi(struct intel_encoder *encoder)
@@ -785,10 +830,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
785 } 830 }
786} 831}
787 832
833static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
834{
835 struct drm_device *dev = intel_hdmi_to_dev(hdmi);
836
837 if (IS_G4X(dev))
838 return 165000;
839 else if (IS_HASWELL(dev))
840 return 300000;
841 else
842 return 225000;
843}
844
788static int intel_hdmi_mode_valid(struct drm_connector *connector, 845static int intel_hdmi_mode_valid(struct drm_connector *connector,
789 struct drm_display_mode *mode) 846 struct drm_display_mode *mode)
790{ 847{
791 if (mode->clock > 165000) 848 if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
792 return MODE_CLOCK_HIGH; 849 return MODE_CLOCK_HIGH;
793 if (mode->clock < 20000) 850 if (mode->clock < 20000)
794 return MODE_CLOCK_LOW; 851 return MODE_CLOCK_LOW;
@@ -806,6 +863,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
806 struct drm_device *dev = encoder->base.dev; 863 struct drm_device *dev = encoder->base.dev;
807 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 864 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
808 int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; 865 int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
866 int portclock_limit = hdmi_portclock_limit(intel_hdmi);
809 int desired_bpp; 867 int desired_bpp;
810 868
811 if (intel_hdmi->color_range_auto) { 869 if (intel_hdmi->color_range_auto) {
@@ -829,7 +887,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
829 * outputs. We also need to check that the higher clock still fits 887 * outputs. We also need to check that the higher clock still fits
830 * within limits. 888 * within limits.
831 */ 889 */
832 if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000 890 if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
833 && HAS_PCH_SPLIT(dev)) { 891 && HAS_PCH_SPLIT(dev)) {
834 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 892 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
835 desired_bpp = 12*3; 893 desired_bpp = 12*3;
@@ -846,7 +904,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
846 pipe_config->pipe_bpp = desired_bpp; 904 pipe_config->pipe_bpp = desired_bpp;
847 } 905 }
848 906
849 if (adjusted_mode->clock > 225000) { 907 if (adjusted_mode->clock > portclock_limit) {
850 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); 908 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
851 return false; 909 return false;
852 } 910 }
@@ -866,6 +924,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
866 struct edid *edid; 924 struct edid *edid;
867 enum drm_connector_status status = connector_status_disconnected; 925 enum drm_connector_status status = connector_status_disconnected;
868 926
927 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
928 connector->base.id, drm_get_connector_name(connector));
929
869 intel_hdmi->has_hdmi_sink = false; 930 intel_hdmi->has_hdmi_sink = false;
870 intel_hdmi->has_audio = false; 931 intel_hdmi->has_audio = false;
871 intel_hdmi->rgb_quant_range_selectable = false; 932 intel_hdmi->rgb_quant_range_selectable = false;
@@ -1017,6 +1078,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1017 return; 1078 return;
1018 1079
1019 /* Enable clock channels for this port */ 1080 /* Enable clock channels for this port */
1081 mutex_lock(&dev_priv->dpio_lock);
1020 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1082 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1021 val = 0; 1083 val = 0;
1022 if (pipe) 1084 if (pipe)
@@ -1047,6 +1109,11 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1047 0x00760018); 1109 0x00760018);
1048 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 1110 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
1049 0x00400888); 1111 0x00400888);
1112 mutex_unlock(&dev_priv->dpio_lock);
1113
1114 intel_enable_hdmi(encoder);
1115
1116 vlv_wait_port_ready(dev_priv, port);
1050} 1117}
1051 1118
1052static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1119static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1060,6 +1127,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1060 return; 1127 return;
1061 1128
1062 /* Program Tx lane resets to default */ 1129 /* Program Tx lane resets to default */
1130 mutex_lock(&dev_priv->dpio_lock);
1063 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1131 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1064 DPIO_PCS_TX_LANE2_RESET | 1132 DPIO_PCS_TX_LANE2_RESET |
1065 DPIO_PCS_TX_LANE1_RESET); 1133 DPIO_PCS_TX_LANE1_RESET);
@@ -1078,6 +1146,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1078 0x00002000); 1146 0x00002000);
1079 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 1147 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
1080 DPIO_TX_OCALINIT_EN); 1148 DPIO_TX_OCALINIT_EN);
1149 mutex_unlock(&dev_priv->dpio_lock);
1081} 1150}
1082 1151
1083static void intel_hdmi_post_disable(struct intel_encoder *encoder) 1152static void intel_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1100,10 +1169,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
1100 kfree(connector); 1169 kfree(connector);
1101} 1170}
1102 1171
1103static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
1104 .mode_set = intel_hdmi_mode_set,
1105};
1106
1107static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 1172static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
1108 .dpms = intel_connector_dpms, 1173 .dpms = intel_connector_dpms,
1109 .detect = intel_hdmi_detect, 1174 .detect = intel_hdmi_detect,
@@ -1208,7 +1273,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1208{ 1273{
1209 struct intel_digital_port *intel_dig_port; 1274 struct intel_digital_port *intel_dig_port;
1210 struct intel_encoder *intel_encoder; 1275 struct intel_encoder *intel_encoder;
1211 struct drm_encoder *encoder;
1212 struct intel_connector *intel_connector; 1276 struct intel_connector *intel_connector;
1213 1277
1214 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1278 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
@@ -1222,21 +1286,22 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1222 } 1286 }
1223 1287
1224 intel_encoder = &intel_dig_port->base; 1288 intel_encoder = &intel_dig_port->base;
1225 encoder = &intel_encoder->base;
1226 1289
1227 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, 1290 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
1228 DRM_MODE_ENCODER_TMDS); 1291 DRM_MODE_ENCODER_TMDS);
1229 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
1230 1292
1231 intel_encoder->compute_config = intel_hdmi_compute_config; 1293 intel_encoder->compute_config = intel_hdmi_compute_config;
1232 intel_encoder->enable = intel_enable_hdmi; 1294 intel_encoder->mode_set = intel_hdmi_mode_set;
1233 intel_encoder->disable = intel_disable_hdmi; 1295 intel_encoder->disable = intel_disable_hdmi;
1234 intel_encoder->get_hw_state = intel_hdmi_get_hw_state; 1296 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1235 intel_encoder->get_config = intel_hdmi_get_config; 1297 intel_encoder->get_config = intel_hdmi_get_config;
1236 if (IS_VALLEYVIEW(dev)) { 1298 if (IS_VALLEYVIEW(dev)) {
1237 intel_encoder->pre_enable = intel_hdmi_pre_enable;
1238 intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable; 1299 intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
1300 intel_encoder->pre_enable = intel_hdmi_pre_enable;
1301 intel_encoder->enable = vlv_enable_hdmi;
1239 intel_encoder->post_disable = intel_hdmi_post_disable; 1302 intel_encoder->post_disable = intel_hdmi_post_disable;
1303 } else {
1304 intel_encoder->enable = intel_enable_hdmi;
1240 } 1305 }
1241 1306
1242 intel_encoder->type = INTEL_OUTPUT_HDMI; 1307 intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 639fe192997c..d1c1e0f7f262 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -398,6 +398,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
398 int i, reg_offset; 398 int i, reg_offset;
399 int ret = 0; 399 int ret = 0;
400 400
401 intel_aux_display_runtime_get(dev_priv);
401 mutex_lock(&dev_priv->gmbus_mutex); 402 mutex_lock(&dev_priv->gmbus_mutex);
402 403
403 if (bus->force_bit) { 404 if (bus->force_bit) {
@@ -497,6 +498,7 @@ timeout:
497 498
498out: 499out:
499 mutex_unlock(&dev_priv->gmbus_mutex); 500 mutex_unlock(&dev_priv->gmbus_mutex);
501 intel_aux_display_runtime_put(dev_priv);
500 return ret; 502 return ret;
501} 503}
502 504
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 021e8daa022d..4d33278e31fb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -109,23 +109,38 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
109 flags |= DRM_MODE_FLAG_PVSYNC; 109 flags |= DRM_MODE_FLAG_PVSYNC;
110 110
111 pipe_config->adjusted_mode.flags |= flags; 111 pipe_config->adjusted_mode.flags |= flags;
112
113 /* gen2/3 store dither state in pfit control, needs to match */
114 if (INTEL_INFO(dev)->gen < 4) {
115 tmp = I915_READ(PFIT_CONTROL);
116
117 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
118 }
112} 119}
113 120
114/* The LVDS pin pair needs to be on before the DPLLs are enabled. 121/* The LVDS pin pair needs to be on before the DPLLs are enabled.
115 * This is an exception to the general rule that mode_set doesn't turn 122 * This is an exception to the general rule that mode_set doesn't turn
116 * things on. 123 * things on.
117 */ 124 */
118static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) 125static void intel_pre_enable_lvds(struct intel_encoder *encoder)
119{ 126{
120 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 127 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
121 struct drm_device *dev = encoder->base.dev; 128 struct drm_device *dev = encoder->base.dev;
122 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
123 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 130 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
124 struct drm_display_mode *fixed_mode = 131 struct drm_display_mode *fixed_mode =
125 lvds_encoder->attached_connector->base.panel.fixed_mode; 132 lvds_encoder->attached_connector->base.panel.fixed_mode;
126 int pipe = intel_crtc->pipe; 133 int pipe = crtc->pipe;
127 u32 temp; 134 u32 temp;
128 135
136 if (HAS_PCH_SPLIT(dev)) {
137 assert_fdi_rx_pll_disabled(dev_priv, pipe);
138 assert_shared_dpll_disabled(dev_priv,
139 intel_crtc_to_shared_dpll(crtc));
140 } else {
141 assert_pll_disabled(dev_priv, pipe);
142 }
143
129 temp = I915_READ(lvds_encoder->reg); 144 temp = I915_READ(lvds_encoder->reg);
130 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 145 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
131 146
@@ -142,7 +157,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
142 157
143 /* set the corresponsding LVDS_BORDER bit */ 158 /* set the corresponsding LVDS_BORDER bit */
144 temp &= ~LVDS_BORDER_ENABLE; 159 temp &= ~LVDS_BORDER_ENABLE;
145 temp |= intel_crtc->config.gmch_pfit.lvds_border_bits; 160 temp |= crtc->config.gmch_pfit.lvds_border_bits;
146 /* Set the B0-B3 data pairs corresponding to whether we're going to 161 /* Set the B0-B3 data pairs corresponding to whether we're going to
147 * set the DPLLs for dual-channel mode or not. 162 * set the DPLLs for dual-channel mode or not.
148 */ 163 */
@@ -162,8 +177,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
162 if (INTEL_INFO(dev)->gen == 4) { 177 if (INTEL_INFO(dev)->gen == 4) {
163 /* Bspec wording suggests that LVDS port dithering only exists 178 /* Bspec wording suggests that LVDS port dithering only exists
164 * for 18bpp panels. */ 179 * for 18bpp panels. */
165 if (intel_crtc->config.dither && 180 if (crtc->config.dither && crtc->config.pipe_bpp == 18)
166 intel_crtc->config.pipe_bpp == 18)
167 temp |= LVDS_ENABLE_DITHER; 181 temp |= LVDS_ENABLE_DITHER;
168 else 182 else
169 temp &= ~LVDS_ENABLE_DITHER; 183 temp &= ~LVDS_ENABLE_DITHER;
@@ -290,14 +304,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
290 304
291 intel_pch_panel_fitting(intel_crtc, pipe_config, 305 intel_pch_panel_fitting(intel_crtc, pipe_config,
292 intel_connector->panel.fitting_mode); 306 intel_connector->panel.fitting_mode);
293 return true;
294 } else { 307 } else {
295 intel_gmch_panel_fitting(intel_crtc, pipe_config, 308 intel_gmch_panel_fitting(intel_crtc, pipe_config,
296 intel_connector->panel.fitting_mode); 309 intel_connector->panel.fitting_mode);
297 }
298 310
299 drm_mode_set_crtcinfo(adjusted_mode, 0); 311 }
300 pipe_config->timings_set = true;
301 312
302 /* 313 /*
303 * XXX: It would be nice to support lower refresh rates on the 314 * XXX: It would be nice to support lower refresh rates on the
@@ -308,14 +319,12 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
308 return true; 319 return true;
309} 320}
310 321
311static void intel_lvds_mode_set(struct drm_encoder *encoder, 322static void intel_lvds_mode_set(struct intel_encoder *encoder)
312 struct drm_display_mode *mode,
313 struct drm_display_mode *adjusted_mode)
314{ 323{
315 /* 324 /*
316 * The LVDS pin pair will already have been turned on in the 325 * We don't do anything here, the LVDS port is fully set up in the pre
317 * intel_crtc_mode_set since it has a large impact on the DPLL 326 * enable hook - the ordering constraints for enabling the lvds port vs.
318 * settings. 327 * enabling the display pll are too strict.
319 */ 328 */
320} 329}
321 330
@@ -332,6 +341,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
332 struct drm_device *dev = connector->dev; 341 struct drm_device *dev = connector->dev;
333 enum drm_connector_status status; 342 enum drm_connector_status status;
334 343
344 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
345 connector->base.id, drm_get_connector_name(connector));
346
335 status = intel_panel_detect(dev); 347 status = intel_panel_detect(dev);
336 if (status != connector_status_unknown) 348 if (status != connector_status_unknown)
337 return status; 349 return status;
@@ -493,10 +505,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
493 return 0; 505 return 0;
494} 506}
495 507
496static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
497 .mode_set = intel_lvds_mode_set,
498};
499
500static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 508static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
501 .get_modes = intel_lvds_get_modes, 509 .get_modes = intel_lvds_get_modes,
502 .mode_valid = intel_lvds_mode_valid, 510 .mode_valid = intel_lvds_mode_valid,
@@ -955,8 +963,9 @@ void intel_lvds_init(struct drm_device *dev)
955 DRM_MODE_ENCODER_LVDS); 963 DRM_MODE_ENCODER_LVDS);
956 964
957 intel_encoder->enable = intel_enable_lvds; 965 intel_encoder->enable = intel_enable_lvds;
958 intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; 966 intel_encoder->pre_enable = intel_pre_enable_lvds;
959 intel_encoder->compute_config = intel_lvds_compute_config; 967 intel_encoder->compute_config = intel_lvds_compute_config;
968 intel_encoder->mode_set = intel_lvds_mode_set;
960 intel_encoder->disable = intel_disable_lvds; 969 intel_encoder->disable = intel_disable_lvds;
961 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 970 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
962 intel_encoder->get_config = intel_lvds_get_config; 971 intel_encoder->get_config = intel_lvds_get_config;
@@ -973,7 +982,6 @@ void intel_lvds_init(struct drm_device *dev)
973 else 982 else
974 intel_encoder->crtc_mask = (1 << 1); 983 intel_encoder->crtc_mask = (1 << 1);
975 984
976 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
977 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 985 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
978 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 986 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
979 connector->interlace_allowed = false; 987 connector->interlace_allowed = false;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a3698812e9c7..ddfd0aefe0c0 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
197 else 197 else
198 regs = io_mapping_map_wc(dev_priv->gtt.mappable, 198 regs = io_mapping_map_wc(dev_priv->gtt.mappable,
199 overlay->reg_bo->gtt_offset); 199 i915_gem_obj_ggtt_offset(overlay->reg_bo));
200 200
201 return regs; 201 return regs;
202} 202}
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
740 swidth = params->src_w; 740 swidth = params->src_w;
741 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); 741 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
742 sheight = params->src_h; 742 sheight = params->src_h;
743 iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y); 743 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
744 ostride = params->stride_Y; 744 ostride = params->stride_Y;
745 745
746 if (params->format & I915_OVERLAY_YUV_PLANAR) { 746 if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
754 params->src_w/uv_hscale); 754 params->src_w/uv_hscale);
755 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; 755 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
756 sheight |= (params->src_h/uv_vscale) << 16; 756 sheight |= (params->src_h/uv_vscale) << 16;
757 iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U); 757 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
758 iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V); 758 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
759 ostride |= params->stride_UV << 16; 759 ostride |= params->stride_UV << 16;
760 } 760 }
761 761
@@ -1333,7 +1333,9 @@ void intel_setup_overlay(struct drm_device *dev)
1333 1333
1334 overlay->dev = dev; 1334 overlay->dev = dev;
1335 1335
1336 reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); 1336 reg_bo = NULL;
1337 if (!OVERLAY_NEEDS_PHYSICAL(dev))
1338 reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
1337 if (reg_bo == NULL) 1339 if (reg_bo == NULL)
1338 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); 1340 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1339 if (reg_bo == NULL) 1341 if (reg_bo == NULL)
@@ -1350,12 +1352,12 @@ void intel_setup_overlay(struct drm_device *dev)
1350 } 1352 }
1351 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1353 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
1352 } else { 1354 } else {
1353 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); 1355 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
1354 if (ret) { 1356 if (ret) {
1355 DRM_ERROR("failed to pin overlay register bo\n"); 1357 DRM_ERROR("failed to pin overlay register bo\n");
1356 goto out_free_bo; 1358 goto out_free_bo;
1357 } 1359 }
1358 overlay->flip_addr = reg_bo->gtt_offset; 1360 overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
1359 1361
1360 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); 1362 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1361 if (ret) { 1363 if (ret) {
@@ -1412,9 +1414,6 @@ void intel_cleanup_overlay(struct drm_device *dev)
1412 kfree(dev_priv->overlay); 1414 kfree(dev_priv->overlay);
1413} 1415}
1414 1416
1415#ifdef CONFIG_DEBUG_FS
1416#include <linux/seq_file.h>
1417
1418struct intel_overlay_error_state { 1417struct intel_overlay_error_state {
1419 struct overlay_registers regs; 1418 struct overlay_registers regs;
1420 unsigned long base; 1419 unsigned long base;
@@ -1435,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1435 overlay->reg_bo->phys_obj->handle->vaddr; 1434 overlay->reg_bo->phys_obj->handle->vaddr;
1436 else 1435 else
1437 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1436 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1438 overlay->reg_bo->gtt_offset); 1437 i915_gem_obj_ggtt_offset(overlay->reg_bo));
1439 1438
1440 return regs; 1439 return regs;
1441} 1440}
@@ -1468,7 +1467,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1468 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1467 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1469 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; 1468 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
1470 else 1469 else
1471 error->base = overlay->reg_bo->gtt_offset; 1470 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1472 1471
1473 regs = intel_overlay_map_regs_atomic(overlay); 1472 regs = intel_overlay_map_regs_atomic(overlay);
1474 if (!regs) 1473 if (!regs)
@@ -1537,4 +1536,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
1537 P(UVSCALEV); 1536 P(UVSCALEV);
1538#undef P 1537#undef P
1539} 1538}
1540#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 80bea1d3209f..a43c33bc4a35 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -494,8 +494,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
494 goto out; 494 goto out;
495 } 495 }
496 496
497 /* scale to hardware */ 497 /* scale to hardware, but be careful to not overflow */
498 level = level * freq / max; 498 if (freq < max)
499 level = level * freq / max;
500 else
501 level = freq / max * level;
499 502
500 dev_priv->backlight.level = level; 503 dev_priv->backlight.level = level;
501 if (dev_priv->backlight.device) 504 if (dev_priv->backlight.device)
@@ -512,6 +515,17 @@ void intel_panel_disable_backlight(struct drm_device *dev)
512 struct drm_i915_private *dev_priv = dev->dev_private; 515 struct drm_i915_private *dev_priv = dev->dev_private;
513 unsigned long flags; 516 unsigned long flags;
514 517
518 /*
519 * Do not disable backlight on the vgaswitcheroo path. When switching
520 * away from i915, the other client may depend on i915 to handle the
521 * backlight. This will leave the backlight on unnecessarily when
522 * another client is not activated.
523 */
524 if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
525 DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
526 return;
527 }
528
515 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 529 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
516 530
517 dev_priv->backlight.enabled = false; 531 dev_priv->backlight.enabled = false;
@@ -580,7 +594,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
580 POSTING_READ(reg); 594 POSTING_READ(reg);
581 I915_WRITE(reg, tmp | BLM_PWM_ENABLE); 595 I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
582 596
583 if (HAS_PCH_SPLIT(dev)) { 597 if (HAS_PCH_SPLIT(dev) &&
598 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
584 tmp = I915_READ(BLC_PWM_PCH_CTL1); 599 tmp = I915_READ(BLC_PWM_PCH_CTL1);
585 tmp |= BLM_PCH_PWM_ENABLE; 600 tmp |= BLM_PCH_PWM_ENABLE;
586 tmp &= ~BLM_PCH_OVERRIDE_ENABLE; 601 tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d10e6735771f..46056820d1d2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,8 +30,7 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h> 32#include <linux/module.h>
33 33#include <drm/i915_powerwell.h>
34#define FORCEWAKE_ACK_TIMEOUT_MS 2
35 34
36/* FBC, or Frame Buffer Compression, is a technique employed to compress the 35/* FBC, or Frame Buffer Compression, is a technique employed to compress the
37 * framebuffer contents in-memory, aiming at reducing the required bandwidth 36 * framebuffer contents in-memory, aiming at reducing the required bandwidth
@@ -86,7 +85,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
86 int plane, i; 85 int plane, i;
87 u32 fbc_ctl, fbc_ctl2; 86 u32 fbc_ctl, fbc_ctl2;
88 87
89 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; 88 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
90 if (fb->pitches[0] < cfb_pitch) 89 if (fb->pitches[0] < cfb_pitch)
91 cfb_pitch = fb->pitches[0]; 90 cfb_pitch = fb->pitches[0];
92 91
@@ -217,7 +216,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
217 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 216 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
218 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 217 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
219 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 218 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
220 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); 219 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
221 /* enable it... */ 220 /* enable it... */
222 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 221 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
223 222
@@ -274,7 +273,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
274 struct drm_i915_gem_object *obj = intel_fb->obj; 273 struct drm_i915_gem_object *obj = intel_fb->obj;
275 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 274 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
276 275
277 I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset); 276 I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
278 277
279 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | 278 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
280 IVB_DPFC_CTL_FENCE_EN | 279 IVB_DPFC_CTL_FENCE_EN |
@@ -325,7 +324,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
325 struct drm_i915_private *dev_priv = dev->dev_private; 324 struct drm_i915_private *dev_priv = dev->dev_private;
326 325
327 mutex_lock(&dev->struct_mutex); 326 mutex_lock(&dev->struct_mutex);
328 if (work == dev_priv->fbc_work) { 327 if (work == dev_priv->fbc.fbc_work) {
329 /* Double check that we haven't switched fb without cancelling 328 /* Double check that we haven't switched fb without cancelling
330 * the prior work. 329 * the prior work.
331 */ 330 */
@@ -333,12 +332,12 @@ static void intel_fbc_work_fn(struct work_struct *__work)
333 dev_priv->display.enable_fbc(work->crtc, 332 dev_priv->display.enable_fbc(work->crtc,
334 work->interval); 333 work->interval);
335 334
336 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; 335 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
337 dev_priv->cfb_fb = work->crtc->fb->base.id; 336 dev_priv->fbc.fb_id = work->crtc->fb->base.id;
338 dev_priv->cfb_y = work->crtc->y; 337 dev_priv->fbc.y = work->crtc->y;
339 } 338 }
340 339
341 dev_priv->fbc_work = NULL; 340 dev_priv->fbc.fbc_work = NULL;
342 } 341 }
343 mutex_unlock(&dev->struct_mutex); 342 mutex_unlock(&dev->struct_mutex);
344 343
@@ -347,28 +346,28 @@ static void intel_fbc_work_fn(struct work_struct *__work)
347 346
348static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) 347static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
349{ 348{
350 if (dev_priv->fbc_work == NULL) 349 if (dev_priv->fbc.fbc_work == NULL)
351 return; 350 return;
352 351
353 DRM_DEBUG_KMS("cancelling pending FBC enable\n"); 352 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
354 353
355 /* Synchronisation is provided by struct_mutex and checking of 354 /* Synchronisation is provided by struct_mutex and checking of
356 * dev_priv->fbc_work, so we can perform the cancellation 355 * dev_priv->fbc.fbc_work, so we can perform the cancellation
357 * entirely asynchronously. 356 * entirely asynchronously.
358 */ 357 */
359 if (cancel_delayed_work(&dev_priv->fbc_work->work)) 358 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
360 /* tasklet was killed before being run, clean up */ 359 /* tasklet was killed before being run, clean up */
361 kfree(dev_priv->fbc_work); 360 kfree(dev_priv->fbc.fbc_work);
362 361
363 /* Mark the work as no longer wanted so that if it does 362 /* Mark the work as no longer wanted so that if it does
364 * wake-up (because the work was already running and waiting 363 * wake-up (because the work was already running and waiting
365 * for our mutex), it will discover that is no longer 364 * for our mutex), it will discover that is no longer
366 * necessary to run. 365 * necessary to run.
367 */ 366 */
368 dev_priv->fbc_work = NULL; 367 dev_priv->fbc.fbc_work = NULL;
369} 368}
370 369
371void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 370static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
372{ 371{
373 struct intel_fbc_work *work; 372 struct intel_fbc_work *work;
374 struct drm_device *dev = crtc->dev; 373 struct drm_device *dev = crtc->dev;
@@ -381,6 +380,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
381 380
382 work = kzalloc(sizeof *work, GFP_KERNEL); 381 work = kzalloc(sizeof *work, GFP_KERNEL);
383 if (work == NULL) { 382 if (work == NULL) {
383 DRM_ERROR("Failed to allocate FBC work structure\n");
384 dev_priv->display.enable_fbc(crtc, interval); 384 dev_priv->display.enable_fbc(crtc, interval);
385 return; 385 return;
386 } 386 }
@@ -390,9 +390,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
390 work->interval = interval; 390 work->interval = interval;
391 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); 391 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
392 392
393 dev_priv->fbc_work = work; 393 dev_priv->fbc.fbc_work = work;
394
395 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
396 394
397 /* Delay the actual enabling to let pageflipping cease and the 395 /* Delay the actual enabling to let pageflipping cease and the
398 * display to settle before starting the compression. Note that 396 * display to settle before starting the compression. Note that
@@ -404,6 +402,8 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
404 * following the termination of the page-flipping sequence 402 * following the termination of the page-flipping sequence
405 * and indeed performing the enable as a co-routine and not 403 * and indeed performing the enable as a co-routine and not
406 * waiting synchronously upon the vblank. 404 * waiting synchronously upon the vblank.
405 *
406 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
407 */ 407 */
408 schedule_delayed_work(&work->work, msecs_to_jiffies(50)); 408 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
409} 409}
@@ -418,7 +418,17 @@ void intel_disable_fbc(struct drm_device *dev)
418 return; 418 return;
419 419
420 dev_priv->display.disable_fbc(dev); 420 dev_priv->display.disable_fbc(dev);
421 dev_priv->cfb_plane = -1; 421 dev_priv->fbc.plane = -1;
422}
423
424static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
425 enum no_fbc_reason reason)
426{
427 if (dev_priv->fbc.no_fbc_reason == reason)
428 return false;
429
430 dev_priv->fbc.no_fbc_reason = reason;
431 return true;
422} 432}
423 433
424/** 434/**
@@ -448,14 +458,18 @@ void intel_update_fbc(struct drm_device *dev)
448 struct drm_framebuffer *fb; 458 struct drm_framebuffer *fb;
449 struct intel_framebuffer *intel_fb; 459 struct intel_framebuffer *intel_fb;
450 struct drm_i915_gem_object *obj; 460 struct drm_i915_gem_object *obj;
451 int enable_fbc;
452 unsigned int max_hdisplay, max_vdisplay; 461 unsigned int max_hdisplay, max_vdisplay;
453 462
454 if (!i915_powersave) 463 if (!I915_HAS_FBC(dev)) {
464 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
455 return; 465 return;
466 }
456 467
457 if (!I915_HAS_FBC(dev)) 468 if (!i915_powersave) {
469 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
470 DRM_DEBUG_KMS("fbc disabled per module param\n");
458 return; 471 return;
472 }
459 473
460 /* 474 /*
461 * If FBC is already on, we just have to verify that we can 475 * If FBC is already on, we just have to verify that we can
@@ -470,8 +484,8 @@ void intel_update_fbc(struct drm_device *dev)
470 if (intel_crtc_active(tmp_crtc) && 484 if (intel_crtc_active(tmp_crtc) &&
471 !to_intel_crtc(tmp_crtc)->primary_disabled) { 485 !to_intel_crtc(tmp_crtc)->primary_disabled) {
472 if (crtc) { 486 if (crtc) {
473 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 487 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
474 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 488 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
475 goto out_disable; 489 goto out_disable;
476 } 490 }
477 crtc = tmp_crtc; 491 crtc = tmp_crtc;
@@ -479,8 +493,8 @@ void intel_update_fbc(struct drm_device *dev)
479 } 493 }
480 494
481 if (!crtc || crtc->fb == NULL) { 495 if (!crtc || crtc->fb == NULL) {
482 DRM_DEBUG_KMS("no output, disabling\n"); 496 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
483 dev_priv->no_fbc_reason = FBC_NO_OUTPUT; 497 DRM_DEBUG_KMS("no output, disabling\n");
484 goto out_disable; 498 goto out_disable;
485 } 499 }
486 500
@@ -489,23 +503,22 @@ void intel_update_fbc(struct drm_device *dev)
489 intel_fb = to_intel_framebuffer(fb); 503 intel_fb = to_intel_framebuffer(fb);
490 obj = intel_fb->obj; 504 obj = intel_fb->obj;
491 505
492 enable_fbc = i915_enable_fbc; 506 if (i915_enable_fbc < 0 &&
493 if (enable_fbc < 0) { 507 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
494 DRM_DEBUG_KMS("fbc set to per-chip default\n"); 508 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
495 enable_fbc = 1; 509 DRM_DEBUG_KMS("disabled per chip default\n");
496 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 510 goto out_disable;
497 enable_fbc = 0;
498 } 511 }
499 if (!enable_fbc) { 512 if (!i915_enable_fbc) {
500 DRM_DEBUG_KMS("fbc disabled per module param\n"); 513 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
501 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 514 DRM_DEBUG_KMS("fbc disabled per module param\n");
502 goto out_disable; 515 goto out_disable;
503 } 516 }
504 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 517 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
505 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 518 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
506 DRM_DEBUG_KMS("mode incompatible with compression, " 519 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
507 "disabling\n"); 520 DRM_DEBUG_KMS("mode incompatible with compression, "
508 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; 521 "disabling\n");
509 goto out_disable; 522 goto out_disable;
510 } 523 }
511 524
@@ -518,14 +531,14 @@ void intel_update_fbc(struct drm_device *dev)
518 } 531 }
519 if ((crtc->mode.hdisplay > max_hdisplay) || 532 if ((crtc->mode.hdisplay > max_hdisplay) ||
520 (crtc->mode.vdisplay > max_vdisplay)) { 533 (crtc->mode.vdisplay > max_vdisplay)) {
521 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 534 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
522 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; 535 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
523 goto out_disable; 536 goto out_disable;
524 } 537 }
525 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) && 538 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
526 intel_crtc->plane != 0) { 539 intel_crtc->plane != 0) {
527 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 540 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
528 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 541 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
529 goto out_disable; 542 goto out_disable;
530 } 543 }
531 544
@@ -534,8 +547,8 @@ void intel_update_fbc(struct drm_device *dev)
534 */ 547 */
535 if (obj->tiling_mode != I915_TILING_X || 548 if (obj->tiling_mode != I915_TILING_X ||
536 obj->fence_reg == I915_FENCE_REG_NONE) { 549 obj->fence_reg == I915_FENCE_REG_NONE) {
537 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); 550 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
538 dev_priv->no_fbc_reason = FBC_NOT_TILED; 551 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
539 goto out_disable; 552 goto out_disable;
540 } 553 }
541 554
@@ -544,8 +557,8 @@ void intel_update_fbc(struct drm_device *dev)
544 goto out_disable; 557 goto out_disable;
545 558
546 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { 559 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
547 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); 560 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
548 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 561 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
549 goto out_disable; 562 goto out_disable;
550 } 563 }
551 564
@@ -554,9 +567,9 @@ void intel_update_fbc(struct drm_device *dev)
554 * cannot be unpinned (and have its GTT offset and fence revoked) 567 * cannot be unpinned (and have its GTT offset and fence revoked)
555 * without first being decoupled from the scanout and FBC disabled. 568 * without first being decoupled from the scanout and FBC disabled.
556 */ 569 */
557 if (dev_priv->cfb_plane == intel_crtc->plane && 570 if (dev_priv->fbc.plane == intel_crtc->plane &&
558 dev_priv->cfb_fb == fb->base.id && 571 dev_priv->fbc.fb_id == fb->base.id &&
559 dev_priv->cfb_y == crtc->y) 572 dev_priv->fbc.y == crtc->y)
560 return; 573 return;
561 574
562 if (intel_fbc_enabled(dev)) { 575 if (intel_fbc_enabled(dev)) {
@@ -588,6 +601,7 @@ void intel_update_fbc(struct drm_device *dev)
588 } 601 }
589 602
590 intel_enable_fbc(crtc, 500); 603 intel_enable_fbc(crtc, 500);
604 dev_priv->fbc.no_fbc_reason = FBC_OK;
591 return; 605 return;
592 606
593out_disable: 607out_disable:
@@ -1666,9 +1680,6 @@ static void i830_update_wm(struct drm_device *dev)
1666 I915_WRITE(FW_BLC, fwater_lo); 1680 I915_WRITE(FW_BLC, fwater_lo);
1667} 1681}
1668 1682
1669#define ILK_LP0_PLANE_LATENCY 700
1670#define ILK_LP0_CURSOR_LATENCY 1300
1671
1672/* 1683/*
1673 * Check the wm result. 1684 * Check the wm result.
1674 * 1685 *
@@ -1783,9 +1794,9 @@ static void ironlake_update_wm(struct drm_device *dev)
1783 enabled = 0; 1794 enabled = 0;
1784 if (g4x_compute_wm0(dev, PIPE_A, 1795 if (g4x_compute_wm0(dev, PIPE_A,
1785 &ironlake_display_wm_info, 1796 &ironlake_display_wm_info,
1786 ILK_LP0_PLANE_LATENCY, 1797 dev_priv->wm.pri_latency[0] * 100,
1787 &ironlake_cursor_wm_info, 1798 &ironlake_cursor_wm_info,
1788 ILK_LP0_CURSOR_LATENCY, 1799 dev_priv->wm.cur_latency[0] * 100,
1789 &plane_wm, &cursor_wm)) { 1800 &plane_wm, &cursor_wm)) {
1790 I915_WRITE(WM0_PIPEA_ILK, 1801 I915_WRITE(WM0_PIPEA_ILK,
1791 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 1802 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
@@ -1797,9 +1808,9 @@ static void ironlake_update_wm(struct drm_device *dev)
1797 1808
1798 if (g4x_compute_wm0(dev, PIPE_B, 1809 if (g4x_compute_wm0(dev, PIPE_B,
1799 &ironlake_display_wm_info, 1810 &ironlake_display_wm_info,
1800 ILK_LP0_PLANE_LATENCY, 1811 dev_priv->wm.pri_latency[0] * 100,
1801 &ironlake_cursor_wm_info, 1812 &ironlake_cursor_wm_info,
1802 ILK_LP0_CURSOR_LATENCY, 1813 dev_priv->wm.cur_latency[0] * 100,
1803 &plane_wm, &cursor_wm)) { 1814 &plane_wm, &cursor_wm)) {
1804 I915_WRITE(WM0_PIPEB_ILK, 1815 I915_WRITE(WM0_PIPEB_ILK,
1805 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 1816 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
@@ -1823,7 +1834,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1823 1834
1824 /* WM1 */ 1835 /* WM1 */
1825 if (!ironlake_compute_srwm(dev, 1, enabled, 1836 if (!ironlake_compute_srwm(dev, 1, enabled,
1826 ILK_READ_WM1_LATENCY() * 500, 1837 dev_priv->wm.pri_latency[1] * 500,
1827 &ironlake_display_srwm_info, 1838 &ironlake_display_srwm_info,
1828 &ironlake_cursor_srwm_info, 1839 &ironlake_cursor_srwm_info,
1829 &fbc_wm, &plane_wm, &cursor_wm)) 1840 &fbc_wm, &plane_wm, &cursor_wm))
@@ -1831,14 +1842,14 @@ static void ironlake_update_wm(struct drm_device *dev)
1831 1842
1832 I915_WRITE(WM1_LP_ILK, 1843 I915_WRITE(WM1_LP_ILK,
1833 WM1_LP_SR_EN | 1844 WM1_LP_SR_EN |
1834 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1845 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
1835 (fbc_wm << WM1_LP_FBC_SHIFT) | 1846 (fbc_wm << WM1_LP_FBC_SHIFT) |
1836 (plane_wm << WM1_LP_SR_SHIFT) | 1847 (plane_wm << WM1_LP_SR_SHIFT) |
1837 cursor_wm); 1848 cursor_wm);
1838 1849
1839 /* WM2 */ 1850 /* WM2 */
1840 if (!ironlake_compute_srwm(dev, 2, enabled, 1851 if (!ironlake_compute_srwm(dev, 2, enabled,
1841 ILK_READ_WM2_LATENCY() * 500, 1852 dev_priv->wm.pri_latency[2] * 500,
1842 &ironlake_display_srwm_info, 1853 &ironlake_display_srwm_info,
1843 &ironlake_cursor_srwm_info, 1854 &ironlake_cursor_srwm_info,
1844 &fbc_wm, &plane_wm, &cursor_wm)) 1855 &fbc_wm, &plane_wm, &cursor_wm))
@@ -1846,7 +1857,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1846 1857
1847 I915_WRITE(WM2_LP_ILK, 1858 I915_WRITE(WM2_LP_ILK,
1848 WM2_LP_EN | 1859 WM2_LP_EN |
1849 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1860 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
1850 (fbc_wm << WM1_LP_FBC_SHIFT) | 1861 (fbc_wm << WM1_LP_FBC_SHIFT) |
1851 (plane_wm << WM1_LP_SR_SHIFT) | 1862 (plane_wm << WM1_LP_SR_SHIFT) |
1852 cursor_wm); 1863 cursor_wm);
@@ -1860,7 +1871,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1860static void sandybridge_update_wm(struct drm_device *dev) 1871static void sandybridge_update_wm(struct drm_device *dev)
1861{ 1872{
1862 struct drm_i915_private *dev_priv = dev->dev_private; 1873 struct drm_i915_private *dev_priv = dev->dev_private;
1863 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 1874 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1864 u32 val; 1875 u32 val;
1865 int fbc_wm, plane_wm, cursor_wm; 1876 int fbc_wm, plane_wm, cursor_wm;
1866 unsigned int enabled; 1877 unsigned int enabled;
@@ -1915,7 +1926,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1915 1926
1916 /* WM1 */ 1927 /* WM1 */
1917 if (!ironlake_compute_srwm(dev, 1, enabled, 1928 if (!ironlake_compute_srwm(dev, 1, enabled,
1918 SNB_READ_WM1_LATENCY() * 500, 1929 dev_priv->wm.pri_latency[1] * 500,
1919 &sandybridge_display_srwm_info, 1930 &sandybridge_display_srwm_info,
1920 &sandybridge_cursor_srwm_info, 1931 &sandybridge_cursor_srwm_info,
1921 &fbc_wm, &plane_wm, &cursor_wm)) 1932 &fbc_wm, &plane_wm, &cursor_wm))
@@ -1923,14 +1934,14 @@ static void sandybridge_update_wm(struct drm_device *dev)
1923 1934
1924 I915_WRITE(WM1_LP_ILK, 1935 I915_WRITE(WM1_LP_ILK,
1925 WM1_LP_SR_EN | 1936 WM1_LP_SR_EN |
1926 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1937 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
1927 (fbc_wm << WM1_LP_FBC_SHIFT) | 1938 (fbc_wm << WM1_LP_FBC_SHIFT) |
1928 (plane_wm << WM1_LP_SR_SHIFT) | 1939 (plane_wm << WM1_LP_SR_SHIFT) |
1929 cursor_wm); 1940 cursor_wm);
1930 1941
1931 /* WM2 */ 1942 /* WM2 */
1932 if (!ironlake_compute_srwm(dev, 2, enabled, 1943 if (!ironlake_compute_srwm(dev, 2, enabled,
1933 SNB_READ_WM2_LATENCY() * 500, 1944 dev_priv->wm.pri_latency[2] * 500,
1934 &sandybridge_display_srwm_info, 1945 &sandybridge_display_srwm_info,
1935 &sandybridge_cursor_srwm_info, 1946 &sandybridge_cursor_srwm_info,
1936 &fbc_wm, &plane_wm, &cursor_wm)) 1947 &fbc_wm, &plane_wm, &cursor_wm))
@@ -1938,14 +1949,14 @@ static void sandybridge_update_wm(struct drm_device *dev)
1938 1949
1939 I915_WRITE(WM2_LP_ILK, 1950 I915_WRITE(WM2_LP_ILK,
1940 WM2_LP_EN | 1951 WM2_LP_EN |
1941 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1952 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
1942 (fbc_wm << WM1_LP_FBC_SHIFT) | 1953 (fbc_wm << WM1_LP_FBC_SHIFT) |
1943 (plane_wm << WM1_LP_SR_SHIFT) | 1954 (plane_wm << WM1_LP_SR_SHIFT) |
1944 cursor_wm); 1955 cursor_wm);
1945 1956
1946 /* WM3 */ 1957 /* WM3 */
1947 if (!ironlake_compute_srwm(dev, 3, enabled, 1958 if (!ironlake_compute_srwm(dev, 3, enabled,
1948 SNB_READ_WM3_LATENCY() * 500, 1959 dev_priv->wm.pri_latency[3] * 500,
1949 &sandybridge_display_srwm_info, 1960 &sandybridge_display_srwm_info,
1950 &sandybridge_cursor_srwm_info, 1961 &sandybridge_cursor_srwm_info,
1951 &fbc_wm, &plane_wm, &cursor_wm)) 1962 &fbc_wm, &plane_wm, &cursor_wm))
@@ -1953,7 +1964,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1953 1964
1954 I915_WRITE(WM3_LP_ILK, 1965 I915_WRITE(WM3_LP_ILK,
1955 WM3_LP_EN | 1966 WM3_LP_EN |
1956 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1967 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
1957 (fbc_wm << WM1_LP_FBC_SHIFT) | 1968 (fbc_wm << WM1_LP_FBC_SHIFT) |
1958 (plane_wm << WM1_LP_SR_SHIFT) | 1969 (plane_wm << WM1_LP_SR_SHIFT) |
1959 cursor_wm); 1970 cursor_wm);
@@ -1962,7 +1973,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1962static void ivybridge_update_wm(struct drm_device *dev) 1973static void ivybridge_update_wm(struct drm_device *dev)
1963{ 1974{
1964 struct drm_i915_private *dev_priv = dev->dev_private; 1975 struct drm_i915_private *dev_priv = dev->dev_private;
1965 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 1976 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1966 u32 val; 1977 u32 val;
1967 int fbc_wm, plane_wm, cursor_wm; 1978 int fbc_wm, plane_wm, cursor_wm;
1968 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm; 1979 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
@@ -2032,7 +2043,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
2032 2043
2033 /* WM1 */ 2044 /* WM1 */
2034 if (!ironlake_compute_srwm(dev, 1, enabled, 2045 if (!ironlake_compute_srwm(dev, 1, enabled,
2035 SNB_READ_WM1_LATENCY() * 500, 2046 dev_priv->wm.pri_latency[1] * 500,
2036 &sandybridge_display_srwm_info, 2047 &sandybridge_display_srwm_info,
2037 &sandybridge_cursor_srwm_info, 2048 &sandybridge_cursor_srwm_info,
2038 &fbc_wm, &plane_wm, &cursor_wm)) 2049 &fbc_wm, &plane_wm, &cursor_wm))
@@ -2040,14 +2051,14 @@ static void ivybridge_update_wm(struct drm_device *dev)
2040 2051
2041 I915_WRITE(WM1_LP_ILK, 2052 I915_WRITE(WM1_LP_ILK,
2042 WM1_LP_SR_EN | 2053 WM1_LP_SR_EN |
2043 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 2054 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
2044 (fbc_wm << WM1_LP_FBC_SHIFT) | 2055 (fbc_wm << WM1_LP_FBC_SHIFT) |
2045 (plane_wm << WM1_LP_SR_SHIFT) | 2056 (plane_wm << WM1_LP_SR_SHIFT) |
2046 cursor_wm); 2057 cursor_wm);
2047 2058
2048 /* WM2 */ 2059 /* WM2 */
2049 if (!ironlake_compute_srwm(dev, 2, enabled, 2060 if (!ironlake_compute_srwm(dev, 2, enabled,
2050 SNB_READ_WM2_LATENCY() * 500, 2061 dev_priv->wm.pri_latency[2] * 500,
2051 &sandybridge_display_srwm_info, 2062 &sandybridge_display_srwm_info,
2052 &sandybridge_cursor_srwm_info, 2063 &sandybridge_cursor_srwm_info,
2053 &fbc_wm, &plane_wm, &cursor_wm)) 2064 &fbc_wm, &plane_wm, &cursor_wm))
@@ -2055,19 +2066,19 @@ static void ivybridge_update_wm(struct drm_device *dev)
2055 2066
2056 I915_WRITE(WM2_LP_ILK, 2067 I915_WRITE(WM2_LP_ILK,
2057 WM2_LP_EN | 2068 WM2_LP_EN |
2058 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 2069 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
2059 (fbc_wm << WM1_LP_FBC_SHIFT) | 2070 (fbc_wm << WM1_LP_FBC_SHIFT) |
2060 (plane_wm << WM1_LP_SR_SHIFT) | 2071 (plane_wm << WM1_LP_SR_SHIFT) |
2061 cursor_wm); 2072 cursor_wm);
2062 2073
2063 /* WM3, note we have to correct the cursor latency */ 2074 /* WM3, note we have to correct the cursor latency */
2064 if (!ironlake_compute_srwm(dev, 3, enabled, 2075 if (!ironlake_compute_srwm(dev, 3, enabled,
2065 SNB_READ_WM3_LATENCY() * 500, 2076 dev_priv->wm.pri_latency[3] * 500,
2066 &sandybridge_display_srwm_info, 2077 &sandybridge_display_srwm_info,
2067 &sandybridge_cursor_srwm_info, 2078 &sandybridge_cursor_srwm_info,
2068 &fbc_wm, &plane_wm, &ignore_cursor_wm) || 2079 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2069 !ironlake_compute_srwm(dev, 3, enabled, 2080 !ironlake_compute_srwm(dev, 3, enabled,
2070 2 * SNB_READ_WM3_LATENCY() * 500, 2081 dev_priv->wm.cur_latency[3] * 500,
2071 &sandybridge_display_srwm_info, 2082 &sandybridge_display_srwm_info,
2072 &sandybridge_cursor_srwm_info, 2083 &sandybridge_cursor_srwm_info,
2073 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm)) 2084 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
@@ -2075,14 +2086,14 @@ static void ivybridge_update_wm(struct drm_device *dev)
2075 2086
2076 I915_WRITE(WM3_LP_ILK, 2087 I915_WRITE(WM3_LP_ILK,
2077 WM3_LP_EN | 2088 WM3_LP_EN |
2078 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | 2089 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
2079 (fbc_wm << WM1_LP_FBC_SHIFT) | 2090 (fbc_wm << WM1_LP_FBC_SHIFT) |
2080 (plane_wm << WM1_LP_SR_SHIFT) | 2091 (plane_wm << WM1_LP_SR_SHIFT) |
2081 cursor_wm); 2092 cursor_wm);
2082} 2093}
2083 2094
2084static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev, 2095static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2085 struct drm_crtc *crtc) 2096 struct drm_crtc *crtc)
2086{ 2097{
2087 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2088 uint32_t pixel_rate, pfit_size; 2099 uint32_t pixel_rate, pfit_size;
@@ -2112,30 +2123,38 @@ static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
2112 return pixel_rate; 2123 return pixel_rate;
2113} 2124}
2114 2125
2115static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, 2126/* latency must be in 0.1us units. */
2127static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2116 uint32_t latency) 2128 uint32_t latency)
2117{ 2129{
2118 uint64_t ret; 2130 uint64_t ret;
2119 2131
2132 if (WARN(latency == 0, "Latency value missing\n"))
2133 return UINT_MAX;
2134
2120 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency; 2135 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
2121 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; 2136 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
2122 2137
2123 return ret; 2138 return ret;
2124} 2139}
2125 2140
2126static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 2141/* latency must be in 0.1us units. */
2142static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2127 uint32_t horiz_pixels, uint8_t bytes_per_pixel, 2143 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2128 uint32_t latency) 2144 uint32_t latency)
2129{ 2145{
2130 uint32_t ret; 2146 uint32_t ret;
2131 2147
2148 if (WARN(latency == 0, "Latency value missing\n"))
2149 return UINT_MAX;
2150
2132 ret = (latency * pixel_rate) / (pipe_htotal * 10000); 2151 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
2133 ret = (ret + 1) * horiz_pixels * bytes_per_pixel; 2152 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
2134 ret = DIV_ROUND_UP(ret, 64) + 2; 2153 ret = DIV_ROUND_UP(ret, 64) + 2;
2135 return ret; 2154 return ret;
2136} 2155}
2137 2156
2138static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, 2157static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2139 uint8_t bytes_per_pixel) 2158 uint8_t bytes_per_pixel)
2140{ 2159{
2141 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 2160 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
@@ -2143,15 +2162,11 @@ static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2143 2162
2144struct hsw_pipe_wm_parameters { 2163struct hsw_pipe_wm_parameters {
2145 bool active; 2164 bool active;
2146 bool sprite_enabled;
2147 uint8_t pri_bytes_per_pixel;
2148 uint8_t spr_bytes_per_pixel;
2149 uint8_t cur_bytes_per_pixel;
2150 uint32_t pri_horiz_pixels;
2151 uint32_t spr_horiz_pixels;
2152 uint32_t cur_horiz_pixels;
2153 uint32_t pipe_htotal; 2165 uint32_t pipe_htotal;
2154 uint32_t pixel_rate; 2166 uint32_t pixel_rate;
2167 struct intel_plane_wm_parameters pri;
2168 struct intel_plane_wm_parameters spr;
2169 struct intel_plane_wm_parameters cur;
2155}; 2170};
2156 2171
2157struct hsw_wm_maximums { 2172struct hsw_wm_maximums {
@@ -2161,15 +2176,6 @@ struct hsw_wm_maximums {
2161 uint16_t fbc; 2176 uint16_t fbc;
2162}; 2177};
2163 2178
2164struct hsw_lp_wm_result {
2165 bool enable;
2166 bool fbc_enable;
2167 uint32_t pri_val;
2168 uint32_t spr_val;
2169 uint32_t cur_val;
2170 uint32_t fbc_val;
2171};
2172
2173struct hsw_wm_values { 2179struct hsw_wm_values {
2174 uint32_t wm_pipe[3]; 2180 uint32_t wm_pipe[3];
2175 uint32_t wm_lp[3]; 2181 uint32_t wm_lp[3];
@@ -2178,128 +2184,289 @@ struct hsw_wm_values {
2178 bool enable_fbc_wm; 2184 bool enable_fbc_wm;
2179}; 2185};
2180 2186
2181enum hsw_data_buf_partitioning { 2187/* used in computing the new watermarks state */
2182 HSW_DATA_BUF_PART_1_2, 2188struct intel_wm_config {
2183 HSW_DATA_BUF_PART_5_6, 2189 unsigned int num_pipes_active;
2190 bool sprites_enabled;
2191 bool sprites_scaled;
2192 bool fbc_wm_enabled;
2184}; 2193};
2185 2194
2186/* For both WM_PIPE and WM_LP. */ 2195/*
2187static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params, 2196 * For both WM_PIPE and WM_LP.
2197 * mem_value must be in 0.1us units.
2198 */
2199static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2188 uint32_t mem_value, 2200 uint32_t mem_value,
2189 bool is_lp) 2201 bool is_lp)
2190{ 2202{
2191 uint32_t method1, method2; 2203 uint32_t method1, method2;
2192 2204
2193 /* TODO: for now, assume the primary plane is always enabled. */ 2205 if (!params->active || !params->pri.enabled)
2194 if (!params->active)
2195 return 0; 2206 return 0;
2196 2207
2197 method1 = hsw_wm_method1(params->pixel_rate, 2208 method1 = ilk_wm_method1(params->pixel_rate,
2198 params->pri_bytes_per_pixel, 2209 params->pri.bytes_per_pixel,
2199 mem_value); 2210 mem_value);
2200 2211
2201 if (!is_lp) 2212 if (!is_lp)
2202 return method1; 2213 return method1;
2203 2214
2204 method2 = hsw_wm_method2(params->pixel_rate, 2215 method2 = ilk_wm_method2(params->pixel_rate,
2205 params->pipe_htotal, 2216 params->pipe_htotal,
2206 params->pri_horiz_pixels, 2217 params->pri.horiz_pixels,
2207 params->pri_bytes_per_pixel, 2218 params->pri.bytes_per_pixel,
2208 mem_value); 2219 mem_value);
2209 2220
2210 return min(method1, method2); 2221 return min(method1, method2);
2211} 2222}
2212 2223
2213/* For both WM_PIPE and WM_LP. */ 2224/*
2214static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params, 2225 * For both WM_PIPE and WM_LP.
2226 * mem_value must be in 0.1us units.
2227 */
2228static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2215 uint32_t mem_value) 2229 uint32_t mem_value)
2216{ 2230{
2217 uint32_t method1, method2; 2231 uint32_t method1, method2;
2218 2232
2219 if (!params->active || !params->sprite_enabled) 2233 if (!params->active || !params->spr.enabled)
2220 return 0; 2234 return 0;
2221 2235
2222 method1 = hsw_wm_method1(params->pixel_rate, 2236 method1 = ilk_wm_method1(params->pixel_rate,
2223 params->spr_bytes_per_pixel, 2237 params->spr.bytes_per_pixel,
2224 mem_value); 2238 mem_value);
2225 method2 = hsw_wm_method2(params->pixel_rate, 2239 method2 = ilk_wm_method2(params->pixel_rate,
2226 params->pipe_htotal, 2240 params->pipe_htotal,
2227 params->spr_horiz_pixels, 2241 params->spr.horiz_pixels,
2228 params->spr_bytes_per_pixel, 2242 params->spr.bytes_per_pixel,
2229 mem_value); 2243 mem_value);
2230 return min(method1, method2); 2244 return min(method1, method2);
2231} 2245}
2232 2246
2233/* For both WM_PIPE and WM_LP. */ 2247/*
2234static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params, 2248 * For both WM_PIPE and WM_LP.
2249 * mem_value must be in 0.1us units.
2250 */
2251static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2235 uint32_t mem_value) 2252 uint32_t mem_value)
2236{ 2253{
2237 if (!params->active) 2254 if (!params->active || !params->cur.enabled)
2238 return 0; 2255 return 0;
2239 2256
2240 return hsw_wm_method2(params->pixel_rate, 2257 return ilk_wm_method2(params->pixel_rate,
2241 params->pipe_htotal, 2258 params->pipe_htotal,
2242 params->cur_horiz_pixels, 2259 params->cur.horiz_pixels,
2243 params->cur_bytes_per_pixel, 2260 params->cur.bytes_per_pixel,
2244 mem_value); 2261 mem_value);
2245} 2262}
2246 2263
2247/* Only for WM_LP. */ 2264/* Only for WM_LP. */
2248static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, 2265static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
2249 uint32_t pri_val, 2266 uint32_t pri_val)
2250 uint32_t mem_value)
2251{ 2267{
2252 if (!params->active) 2268 if (!params->active || !params->pri.enabled)
2253 return 0; 2269 return 0;
2254 2270
2255 return hsw_wm_fbc(pri_val, 2271 return ilk_wm_fbc(pri_val,
2256 params->pri_horiz_pixels, 2272 params->pri.horiz_pixels,
2257 params->pri_bytes_per_pixel); 2273 params->pri.bytes_per_pixel);
2258} 2274}
2259 2275
2260static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max, 2276static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2261 struct hsw_pipe_wm_parameters *params,
2262 struct hsw_lp_wm_result *result)
2263{ 2277{
2264 enum pipe pipe; 2278 if (INTEL_INFO(dev)->gen >= 7)
2265 uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3]; 2279 return 768;
2280 else
2281 return 512;
2282}
2266 2283
2267 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { 2284/* Calculate the maximum primary/sprite plane watermark */
2268 struct hsw_pipe_wm_parameters *p = &params[pipe]; 2285static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2286 int level,
2287 const struct intel_wm_config *config,
2288 enum intel_ddb_partitioning ddb_partitioning,
2289 bool is_sprite)
2290{
2291 unsigned int fifo_size = ilk_display_fifo_size(dev);
2292 unsigned int max;
2269 2293
2270 pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true); 2294 /* if sprites aren't enabled, sprites get nothing */
2271 spr_val[pipe] = hsw_compute_spr_wm(p, mem_value); 2295 if (is_sprite && !config->sprites_enabled)
2272 cur_val[pipe] = hsw_compute_cur_wm(p, mem_value); 2296 return 0;
2273 fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
2274 }
2275 2297
2276 result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]); 2298 /* HSW allows LP1+ watermarks even with multiple pipes */
2277 result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]); 2299 if (level == 0 || config->num_pipes_active > 1) {
2278 result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]); 2300 fifo_size /= INTEL_INFO(dev)->num_pipes;
2279 result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]);
2280 2301
2281 if (result->fbc_val > max->fbc) { 2302 /*
2282 result->fbc_enable = false; 2303 * For some reason the non self refresh
2283 result->fbc_val = 0; 2304 * FIFO size is only half of the self
2284 } else { 2305 * refresh FIFO size on ILK/SNB.
2285 result->fbc_enable = true; 2306 */
2307 if (INTEL_INFO(dev)->gen <= 6)
2308 fifo_size /= 2;
2309 }
2310
2311 if (config->sprites_enabled) {
2312 /* level 0 is always calculated with 1:1 split */
2313 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2314 if (is_sprite)
2315 fifo_size *= 5;
2316 fifo_size /= 6;
2317 } else {
2318 fifo_size /= 2;
2319 }
2286 } 2320 }
2287 2321
2322 /* clamp to max that the registers can hold */
2323 if (INTEL_INFO(dev)->gen >= 7)
2324 /* IVB/HSW primary/sprite plane watermarks */
2325 max = level == 0 ? 127 : 1023;
2326 else if (!is_sprite)
2327 /* ILK/SNB primary plane watermarks */
2328 max = level == 0 ? 127 : 511;
2329 else
2330 /* ILK/SNB sprite plane watermarks */
2331 max = level == 0 ? 63 : 255;
2332
2333 return min(fifo_size, max);
2334}
2335
2336/* Calculate the maximum cursor plane watermark */
2337static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2338 int level,
2339 const struct intel_wm_config *config)
2340{
2341 /* HSW LP1+ watermarks w/ multiple pipes */
2342 if (level > 0 && config->num_pipes_active > 1)
2343 return 64;
2344
2345 /* otherwise just report max that registers can hold */
2346 if (INTEL_INFO(dev)->gen >= 7)
2347 return level == 0 ? 63 : 255;
2348 else
2349 return level == 0 ? 31 : 63;
2350}
2351
2352/* Calculate the maximum FBC watermark */
2353static unsigned int ilk_fbc_wm_max(void)
2354{
2355 /* max that registers can hold */
2356 return 15;
2357}
2358
2359static void ilk_wm_max(struct drm_device *dev,
2360 int level,
2361 const struct intel_wm_config *config,
2362 enum intel_ddb_partitioning ddb_partitioning,
2363 struct hsw_wm_maximums *max)
2364{
2365 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2366 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2367 max->cur = ilk_cursor_wm_max(dev, level, config);
2368 max->fbc = ilk_fbc_wm_max();
2369}
2370
2371static bool ilk_check_wm(int level,
2372 const struct hsw_wm_maximums *max,
2373 struct intel_wm_level *result)
2374{
2375 bool ret;
2376
2377 /* already determined to be invalid? */
2378 if (!result->enable)
2379 return false;
2380
2288 result->enable = result->pri_val <= max->pri && 2381 result->enable = result->pri_val <= max->pri &&
2289 result->spr_val <= max->spr && 2382 result->spr_val <= max->spr &&
2290 result->cur_val <= max->cur; 2383 result->cur_val <= max->cur;
2291 return result->enable; 2384
2385 ret = result->enable;
2386
2387 /*
2388 * HACK until we can pre-compute everything,
2389 * and thus fail gracefully if LP0 watermarks
2390 * are exceeded...
2391 */
2392 if (level == 0 && !result->enable) {
2393 if (result->pri_val > max->pri)
2394 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2395 level, result->pri_val, max->pri);
2396 if (result->spr_val > max->spr)
2397 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2398 level, result->spr_val, max->spr);
2399 if (result->cur_val > max->cur)
2400 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2401 level, result->cur_val, max->cur);
2402
2403 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2404 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2405 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2406 result->enable = true;
2407 }
2408
2409 DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
2410
2411 return ret;
2412}
2413
2414static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2415 int level,
2416 struct hsw_pipe_wm_parameters *p,
2417 struct intel_wm_level *result)
2418{
2419 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2420 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2421 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2422
2423 /* WM1+ latency values stored in 0.5us units */
2424 if (level > 0) {
2425 pri_latency *= 5;
2426 spr_latency *= 5;
2427 cur_latency *= 5;
2428 }
2429
2430 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2431 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2432 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2433 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2434 result->enable = true;
2435}
2436
2437static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
2438 int level, struct hsw_wm_maximums *max,
2439 struct hsw_pipe_wm_parameters *params,
2440 struct intel_wm_level *result)
2441{
2442 enum pipe pipe;
2443 struct intel_wm_level res[3];
2444
2445 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
2446 ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
2447
2448 result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
2449 result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
2450 result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
2451 result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
2452 result->enable = true;
2453
2454 return ilk_check_wm(level, max, result);
2292} 2455}
2293 2456
2294static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv, 2457static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
2295 uint32_t mem_value, enum pipe pipe, 2458 enum pipe pipe,
2296 struct hsw_pipe_wm_parameters *params) 2459 struct hsw_pipe_wm_parameters *params)
2297{ 2460{
2298 uint32_t pri_val, cur_val, spr_val; 2461 uint32_t pri_val, cur_val, spr_val;
2462 /* WM0 latency values stored in 0.1us units */
2463 uint16_t pri_latency = dev_priv->wm.pri_latency[0];
2464 uint16_t spr_latency = dev_priv->wm.spr_latency[0];
2465 uint16_t cur_latency = dev_priv->wm.cur_latency[0];
2299 2466
2300 pri_val = hsw_compute_pri_wm(params, mem_value, false); 2467 pri_val = ilk_compute_pri_wm(params, pri_latency, false);
2301 spr_val = hsw_compute_spr_wm(params, mem_value); 2468 spr_val = ilk_compute_spr_wm(params, spr_latency);
2302 cur_val = hsw_compute_cur_wm(params, mem_value); 2469 cur_val = ilk_compute_cur_wm(params, cur_latency);
2303 2470
2304 WARN(pri_val > 127, 2471 WARN(pri_val > 127,
2305 "Primary WM error, mode not supported for pipe %c\n", 2472 "Primary WM error, mode not supported for pipe %c\n",
@@ -2338,27 +2505,116 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2338 PIPE_WM_LINETIME_TIME(linetime); 2505 PIPE_WM_LINETIME_TIME(linetime);
2339} 2506}
2340 2507
2508static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2509{
2510 struct drm_i915_private *dev_priv = dev->dev_private;
2511
2512 if (IS_HASWELL(dev)) {
2513 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2514
2515 wm[0] = (sskpd >> 56) & 0xFF;
2516 if (wm[0] == 0)
2517 wm[0] = sskpd & 0xF;
2518 wm[1] = (sskpd >> 4) & 0xFF;
2519 wm[2] = (sskpd >> 12) & 0xFF;
2520 wm[3] = (sskpd >> 20) & 0x1FF;
2521 wm[4] = (sskpd >> 32) & 0x1FF;
2522 } else if (INTEL_INFO(dev)->gen >= 6) {
2523 uint32_t sskpd = I915_READ(MCH_SSKPD);
2524
2525 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2526 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2527 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2528 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2529 } else if (INTEL_INFO(dev)->gen >= 5) {
2530 uint32_t mltr = I915_READ(MLTR_ILK);
2531
2532 /* ILK primary LP0 latency is 700 ns */
2533 wm[0] = 7;
2534 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2535 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2536 }
2537}
2538
2539static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2540{
2541 /* ILK sprite LP0 latency is 1300 ns */
2542 if (INTEL_INFO(dev)->gen == 5)
2543 wm[0] = 13;
2544}
2545
2546static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2547{
2548 /* ILK cursor LP0 latency is 1300 ns */
2549 if (INTEL_INFO(dev)->gen == 5)
2550 wm[0] = 13;
2551
2552 /* WaDoubleCursorLP3Latency:ivb */
2553 if (IS_IVYBRIDGE(dev))
2554 wm[3] *= 2;
2555}
2556
2557static void intel_print_wm_latency(struct drm_device *dev,
2558 const char *name,
2559 const uint16_t wm[5])
2560{
2561 int level, max_level;
2562
2563 /* how many WM levels are we expecting */
2564 if (IS_HASWELL(dev))
2565 max_level = 4;
2566 else if (INTEL_INFO(dev)->gen >= 6)
2567 max_level = 3;
2568 else
2569 max_level = 2;
2570
2571 for (level = 0; level <= max_level; level++) {
2572 unsigned int latency = wm[level];
2573
2574 if (latency == 0) {
2575 DRM_ERROR("%s WM%d latency not provided\n",
2576 name, level);
2577 continue;
2578 }
2579
2580 /* WM1+ latency values in 0.5us units */
2581 if (level > 0)
2582 latency *= 5;
2583
2584 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2585 name, level, wm[level],
2586 latency / 10, latency % 10);
2587 }
2588}
2589
2590static void intel_setup_wm_latency(struct drm_device *dev)
2591{
2592 struct drm_i915_private *dev_priv = dev->dev_private;
2593
2594 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2595
2596 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2597 sizeof(dev_priv->wm.pri_latency));
2598 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2599 sizeof(dev_priv->wm.pri_latency));
2600
2601 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2602 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2603
2604 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2605 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2606 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2607}
2608
2341static void hsw_compute_wm_parameters(struct drm_device *dev, 2609static void hsw_compute_wm_parameters(struct drm_device *dev,
2342 struct hsw_pipe_wm_parameters *params, 2610 struct hsw_pipe_wm_parameters *params,
2343 uint32_t *wm,
2344 struct hsw_wm_maximums *lp_max_1_2, 2611 struct hsw_wm_maximums *lp_max_1_2,
2345 struct hsw_wm_maximums *lp_max_5_6) 2612 struct hsw_wm_maximums *lp_max_5_6)
2346{ 2613{
2347 struct drm_i915_private *dev_priv = dev->dev_private;
2348 struct drm_crtc *crtc; 2614 struct drm_crtc *crtc;
2349 struct drm_plane *plane; 2615 struct drm_plane *plane;
2350 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2351 enum pipe pipe; 2616 enum pipe pipe;
2352 int pipes_active = 0, sprites_enabled = 0; 2617 struct intel_wm_config config = {};
2353
2354 if ((sskpd >> 56) & 0xFF)
2355 wm[0] = (sskpd >> 56) & 0xFF;
2356 else
2357 wm[0] = sskpd & 0xF;
2358 wm[1] = ((sskpd >> 4) & 0xFF) * 5;
2359 wm[2] = ((sskpd >> 12) & 0xFF) * 5;
2360 wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
2361 wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
2362 2618
2363 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2619 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2364 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2371,15 +2627,18 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
2371 if (!p->active) 2627 if (!p->active)
2372 continue; 2628 continue;
2373 2629
2374 pipes_active++; 2630 config.num_pipes_active++;
2375 2631
2376 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; 2632 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2377 p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc); 2633 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2378 p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8; 2634 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2379 p->cur_bytes_per_pixel = 4; 2635 p->cur.bytes_per_pixel = 4;
2380 p->pri_horiz_pixels = 2636 p->pri.horiz_pixels =
2381 intel_crtc->config.requested_mode.hdisplay; 2637 intel_crtc->config.requested_mode.hdisplay;
2382 p->cur_horiz_pixels = 64; 2638 p->cur.horiz_pixels = 64;
2639 /* TODO: for now, assume primary and cursor planes are always enabled. */
2640 p->pri.enabled = true;
2641 p->cur.enabled = true;
2383 } 2642 }
2384 2643
2385 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 2644 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2389,59 +2648,53 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
2389 pipe = intel_plane->pipe; 2648 pipe = intel_plane->pipe;
2390 p = &params[pipe]; 2649 p = &params[pipe];
2391 2650
2392 p->sprite_enabled = intel_plane->wm.enable; 2651 p->spr = intel_plane->wm;
2393 p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
2394 p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
2395 2652
2396 if (p->sprite_enabled) 2653 config.sprites_enabled |= p->spr.enabled;
2397 sprites_enabled++; 2654 config.sprites_scaled |= p->spr.scaled;
2398 } 2655 }
2399 2656
2400 if (pipes_active > 1) { 2657 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
2401 lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256; 2658
2402 lp_max_1_2->spr = lp_max_5_6->spr = 128; 2659 /* 5/6 split only in single pipe config on IVB+ */
2403 lp_max_1_2->cur = lp_max_5_6->cur = 64; 2660 if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
2404 } else { 2661 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
2405 lp_max_1_2->pri = sprites_enabled ? 384 : 768; 2662 else
2406 lp_max_5_6->pri = sprites_enabled ? 128 : 768; 2663 *lp_max_5_6 = *lp_max_1_2;
2407 lp_max_1_2->spr = 384;
2408 lp_max_5_6->spr = 640;
2409 lp_max_1_2->cur = lp_max_5_6->cur = 255;
2410 }
2411 lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
2412} 2664}
2413 2665
2414static void hsw_compute_wm_results(struct drm_device *dev, 2666static void hsw_compute_wm_results(struct drm_device *dev,
2415 struct hsw_pipe_wm_parameters *params, 2667 struct hsw_pipe_wm_parameters *params,
2416 uint32_t *wm,
2417 struct hsw_wm_maximums *lp_maximums, 2668 struct hsw_wm_maximums *lp_maximums,
2418 struct hsw_wm_values *results) 2669 struct hsw_wm_values *results)
2419{ 2670{
2420 struct drm_i915_private *dev_priv = dev->dev_private; 2671 struct drm_i915_private *dev_priv = dev->dev_private;
2421 struct drm_crtc *crtc; 2672 struct drm_crtc *crtc;
2422 struct hsw_lp_wm_result lp_results[4] = {}; 2673 struct intel_wm_level lp_results[4] = {};
2423 enum pipe pipe; 2674 enum pipe pipe;
2424 int level, max_level, wm_lp; 2675 int level, max_level, wm_lp;
2425 2676
2426 for (level = 1; level <= 4; level++) 2677 for (level = 1; level <= 4; level++)
2427 if (!hsw_compute_lp_wm(wm[level], lp_maximums, params, 2678 if (!hsw_compute_lp_wm(dev_priv, level,
2679 lp_maximums, params,
2428 &lp_results[level - 1])) 2680 &lp_results[level - 1]))
2429 break; 2681 break;
2430 max_level = level - 1; 2682 max_level = level - 1;
2431 2683
2684 memset(results, 0, sizeof(*results));
2685
2432 /* The spec says it is preferred to disable FBC WMs instead of disabling 2686 /* The spec says it is preferred to disable FBC WMs instead of disabling
2433 * a WM level. */ 2687 * a WM level. */
2434 results->enable_fbc_wm = true; 2688 results->enable_fbc_wm = true;
2435 for (level = 1; level <= max_level; level++) { 2689 for (level = 1; level <= max_level; level++) {
2436 if (!lp_results[level - 1].fbc_enable) { 2690 if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
2437 results->enable_fbc_wm = false; 2691 results->enable_fbc_wm = false;
2438 break; 2692 lp_results[level - 1].fbc_val = 0;
2439 } 2693 }
2440 } 2694 }
2441 2695
2442 memset(results, 0, sizeof(*results));
2443 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2696 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2444 const struct hsw_lp_wm_result *r; 2697 const struct intel_wm_level *r;
2445 2698
2446 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp; 2699 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
2447 if (level > max_level) 2700 if (level > max_level)
@@ -2456,8 +2709,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2456 } 2709 }
2457 2710
2458 for_each_pipe(pipe) 2711 for_each_pipe(pipe)
2459 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0], 2712 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
2460 pipe,
2461 &params[pipe]); 2713 &params[pipe]);
2462 2714
2463 for_each_pipe(pipe) { 2715 for_each_pipe(pipe) {
@@ -2468,8 +2720,8 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2468 2720
2469/* Find the result with the highest level enabled. Check for enable_fbc_wm in 2721/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2470 * case both are at the same level. Prefer r1 in case they're the same. */ 2722 * case both are at the same level. Prefer r1 in case they're the same. */
2471struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, 2723static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2472 struct hsw_wm_values *r2) 2724 struct hsw_wm_values *r2)
2473{ 2725{
2474 int i, val_r1 = 0, val_r2 = 0; 2726 int i, val_r1 = 0, val_r2 = 0;
2475 2727
@@ -2498,11 +2750,11 @@ struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2498 */ 2750 */
2499static void hsw_write_wm_values(struct drm_i915_private *dev_priv, 2751static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2500 struct hsw_wm_values *results, 2752 struct hsw_wm_values *results,
2501 enum hsw_data_buf_partitioning partitioning) 2753 enum intel_ddb_partitioning partitioning)
2502{ 2754{
2503 struct hsw_wm_values previous; 2755 struct hsw_wm_values previous;
2504 uint32_t val; 2756 uint32_t val;
2505 enum hsw_data_buf_partitioning prev_partitioning; 2757 enum intel_ddb_partitioning prev_partitioning;
2506 bool prev_enable_fbc_wm; 2758 bool prev_enable_fbc_wm;
2507 2759
2508 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK); 2760 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
@@ -2519,7 +2771,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2519 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C)); 2771 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2520 2772
2521 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 2773 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2522 HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2; 2774 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2523 2775
2524 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 2776 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2525 2777
@@ -2558,7 +2810,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2558 2810
2559 if (prev_partitioning != partitioning) { 2811 if (prev_partitioning != partitioning) {
2560 val = I915_READ(WM_MISC); 2812 val = I915_READ(WM_MISC);
2561 if (partitioning == HSW_DATA_BUF_PART_1_2) 2813 if (partitioning == INTEL_DDB_PART_1_2)
2562 val &= ~WM_MISC_DATA_PARTITION_5_6; 2814 val &= ~WM_MISC_DATA_PARTITION_5_6;
2563 else 2815 else
2564 val |= WM_MISC_DATA_PARTITION_5_6; 2816 val |= WM_MISC_DATA_PARTITION_5_6;
@@ -2595,44 +2847,39 @@ static void haswell_update_wm(struct drm_device *dev)
2595 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; 2847 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
2596 struct hsw_pipe_wm_parameters params[3]; 2848 struct hsw_pipe_wm_parameters params[3];
2597 struct hsw_wm_values results_1_2, results_5_6, *best_results; 2849 struct hsw_wm_values results_1_2, results_5_6, *best_results;
2598 uint32_t wm[5]; 2850 enum intel_ddb_partitioning partitioning;
2599 enum hsw_data_buf_partitioning partitioning;
2600 2851
2601 hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6); 2852 hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
2602 2853
2603 hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2); 2854 hsw_compute_wm_results(dev, params,
2855 &lp_max_1_2, &results_1_2);
2604 if (lp_max_1_2.pri != lp_max_5_6.pri) { 2856 if (lp_max_1_2.pri != lp_max_5_6.pri) {
2605 hsw_compute_wm_results(dev, params, wm, &lp_max_5_6, 2857 hsw_compute_wm_results(dev, params,
2606 &results_5_6); 2858 &lp_max_5_6, &results_5_6);
2607 best_results = hsw_find_best_result(&results_1_2, &results_5_6); 2859 best_results = hsw_find_best_result(&results_1_2, &results_5_6);
2608 } else { 2860 } else {
2609 best_results = &results_1_2; 2861 best_results = &results_1_2;
2610 } 2862 }
2611 2863
2612 partitioning = (best_results == &results_1_2) ? 2864 partitioning = (best_results == &results_1_2) ?
2613 HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6; 2865 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2614 2866
2615 hsw_write_wm_values(dev_priv, best_results, partitioning); 2867 hsw_write_wm_values(dev_priv, best_results, partitioning);
2616} 2868}
2617 2869
2618static void haswell_update_sprite_wm(struct drm_device *dev, int pipe, 2870static void haswell_update_sprite_wm(struct drm_plane *plane,
2871 struct drm_crtc *crtc,
2619 uint32_t sprite_width, int pixel_size, 2872 uint32_t sprite_width, int pixel_size,
2620 bool enable) 2873 bool enabled, bool scaled)
2621{ 2874{
2622 struct drm_plane *plane; 2875 struct intel_plane *intel_plane = to_intel_plane(plane);
2623 2876
2624 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 2877 intel_plane->wm.enabled = enabled;
2625 struct intel_plane *intel_plane = to_intel_plane(plane); 2878 intel_plane->wm.scaled = scaled;
2626 2879 intel_plane->wm.horiz_pixels = sprite_width;
2627 if (intel_plane->pipe == pipe) { 2880 intel_plane->wm.bytes_per_pixel = pixel_size;
2628 intel_plane->wm.enable = enable;
2629 intel_plane->wm.horiz_pixels = sprite_width + 1;
2630 intel_plane->wm.bytes_per_pixel = pixel_size;
2631 break;
2632 }
2633 }
2634 2881
2635 haswell_update_wm(dev); 2882 haswell_update_wm(plane->dev);
2636} 2883}
2637 2884
2638static bool 2885static bool
@@ -2711,17 +2958,20 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2711 return *sprite_wm > 0x3ff ? false : true; 2958 return *sprite_wm > 0x3ff ? false : true;
2712} 2959}
2713 2960
2714static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, 2961static void sandybridge_update_sprite_wm(struct drm_plane *plane,
2962 struct drm_crtc *crtc,
2715 uint32_t sprite_width, int pixel_size, 2963 uint32_t sprite_width, int pixel_size,
2716 bool enable) 2964 bool enabled, bool scaled)
2717{ 2965{
2966 struct drm_device *dev = plane->dev;
2718 struct drm_i915_private *dev_priv = dev->dev_private; 2967 struct drm_i915_private *dev_priv = dev->dev_private;
2719 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 2968 int pipe = to_intel_plane(plane)->pipe;
2969 int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
2720 u32 val; 2970 u32 val;
2721 int sprite_wm, reg; 2971 int sprite_wm, reg;
2722 int ret; 2972 int ret;
2723 2973
2724 if (!enable) 2974 if (!enabled)
2725 return; 2975 return;
2726 2976
2727 switch (pipe) { 2977 switch (pipe) {
@@ -2756,7 +3006,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2756 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 3006 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2757 pixel_size, 3007 pixel_size,
2758 &sandybridge_display_srwm_info, 3008 &sandybridge_display_srwm_info,
2759 SNB_READ_WM1_LATENCY() * 500, 3009 dev_priv->wm.spr_latency[1] * 500,
2760 &sprite_wm); 3010 &sprite_wm);
2761 if (!ret) { 3011 if (!ret) {
2762 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n", 3012 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
@@ -2772,7 +3022,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2772 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 3022 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2773 pixel_size, 3023 pixel_size,
2774 &sandybridge_display_srwm_info, 3024 &sandybridge_display_srwm_info,
2775 SNB_READ_WM2_LATENCY() * 500, 3025 dev_priv->wm.spr_latency[2] * 500,
2776 &sprite_wm); 3026 &sprite_wm);
2777 if (!ret) { 3027 if (!ret) {
2778 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n", 3028 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
@@ -2784,7 +3034,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2784 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 3034 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2785 pixel_size, 3035 pixel_size,
2786 &sandybridge_display_srwm_info, 3036 &sandybridge_display_srwm_info,
2787 SNB_READ_WM3_LATENCY() * 500, 3037 dev_priv->wm.spr_latency[3] * 500,
2788 &sprite_wm); 3038 &sprite_wm);
2789 if (!ret) { 3039 if (!ret) {
2790 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n", 3040 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
@@ -2834,15 +3084,16 @@ void intel_update_watermarks(struct drm_device *dev)
2834 dev_priv->display.update_wm(dev); 3084 dev_priv->display.update_wm(dev);
2835} 3085}
2836 3086
2837void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 3087void intel_update_sprite_watermarks(struct drm_plane *plane,
3088 struct drm_crtc *crtc,
2838 uint32_t sprite_width, int pixel_size, 3089 uint32_t sprite_width, int pixel_size,
2839 bool enable) 3090 bool enabled, bool scaled)
2840{ 3091{
2841 struct drm_i915_private *dev_priv = dev->dev_private; 3092 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2842 3093
2843 if (dev_priv->display.update_sprite_wm) 3094 if (dev_priv->display.update_sprite_wm)
2844 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, 3095 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
2845 pixel_size, enable); 3096 pixel_size, enabled, scaled);
2846} 3097}
2847 3098
2848static struct drm_i915_gem_object * 3099static struct drm_i915_gem_object *
@@ -2859,7 +3110,7 @@ intel_alloc_context_page(struct drm_device *dev)
2859 return NULL; 3110 return NULL;
2860 } 3111 }
2861 3112
2862 ret = i915_gem_object_pin(ctx, 4096, true, false); 3113 ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
2863 if (ret) { 3114 if (ret) {
2864 DRM_ERROR("failed to pin power context: %d\n", ret); 3115 DRM_ERROR("failed to pin power context: %d\n", ret);
2865 goto err_unref; 3116 goto err_unref;
@@ -3076,19 +3327,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3076 */ 3327 */
3077static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv) 3328static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3078{ 3329{
3079 unsigned long timeout = jiffies + msecs_to_jiffies(10);
3080 u32 pval; 3330 u32 pval;
3081 3331
3082 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3332 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3083 3333
3084 do { 3334 if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
3085 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 3335 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3086 if (time_after(jiffies, timeout)) {
3087 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3088 break;
3089 }
3090 udelay(10);
3091 } while (pval & 1);
3092 3336
3093 pval >>= 8; 3337 pval >>= 8;
3094 3338
@@ -3129,13 +3373,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3129 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val)); 3373 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
3130} 3374}
3131 3375
3132 3376static void gen6_disable_rps_interrupts(struct drm_device *dev)
3133static void gen6_disable_rps(struct drm_device *dev)
3134{ 3377{
3135 struct drm_i915_private *dev_priv = dev->dev_private; 3378 struct drm_i915_private *dev_priv = dev->dev_private;
3136 3379
3137 I915_WRITE(GEN6_RC_CONTROL, 0);
3138 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3139 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3380 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3140 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS); 3381 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
3141 /* Complete PM interrupt masking here doesn't race with the rps work 3382 /* Complete PM interrupt masking here doesn't race with the rps work
@@ -3143,30 +3384,30 @@ static void gen6_disable_rps(struct drm_device *dev)
3143 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 3384 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3144 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ 3385 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3145 3386
3146 spin_lock_irq(&dev_priv->rps.lock); 3387 spin_lock_irq(&dev_priv->irq_lock);
3147 dev_priv->rps.pm_iir = 0; 3388 dev_priv->rps.pm_iir = 0;
3148 spin_unlock_irq(&dev_priv->rps.lock); 3389 spin_unlock_irq(&dev_priv->irq_lock);
3149 3390
3150 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3391 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3151} 3392}
3152 3393
3153static void valleyview_disable_rps(struct drm_device *dev) 3394static void gen6_disable_rps(struct drm_device *dev)
3154{ 3395{
3155 struct drm_i915_private *dev_priv = dev->dev_private; 3396 struct drm_i915_private *dev_priv = dev->dev_private;
3156 3397
3157 I915_WRITE(GEN6_RC_CONTROL, 0); 3398 I915_WRITE(GEN6_RC_CONTROL, 0);
3158 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3399 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3159 I915_WRITE(GEN6_PMIER, 0);
3160 /* Complete PM interrupt masking here doesn't race with the rps work
3161 * item again unmasking PM interrupts because that is using a different
3162 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3163 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3164 3400
3165 spin_lock_irq(&dev_priv->rps.lock); 3401 gen6_disable_rps_interrupts(dev);
3166 dev_priv->rps.pm_iir = 0; 3402}
3167 spin_unlock_irq(&dev_priv->rps.lock);
3168 3403
3169 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 3404static void valleyview_disable_rps(struct drm_device *dev)
3405{
3406 struct drm_i915_private *dev_priv = dev->dev_private;
3407
3408 I915_WRITE(GEN6_RC_CONTROL, 0);
3409
3410 gen6_disable_rps_interrupts(dev);
3170 3411
3171 if (dev_priv->vlv_pctx) { 3412 if (dev_priv->vlv_pctx) {
3172 drm_gem_object_unreference(&dev_priv->vlv_pctx->base); 3413 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
@@ -3176,6 +3417,10 @@ static void valleyview_disable_rps(struct drm_device *dev)
3176 3417
3177int intel_enable_rc6(const struct drm_device *dev) 3418int intel_enable_rc6(const struct drm_device *dev)
3178{ 3419{
3420 /* No RC6 before Ironlake */
3421 if (INTEL_INFO(dev)->gen < 5)
3422 return 0;
3423
3179 /* Respect the kernel parameter if it is set */ 3424 /* Respect the kernel parameter if it is set */
3180 if (i915_enable_rc6 >= 0) 3425 if (i915_enable_rc6 >= 0)
3181 return i915_enable_rc6; 3426 return i915_enable_rc6;
@@ -3199,6 +3444,19 @@ int intel_enable_rc6(const struct drm_device *dev)
3199 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3444 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3200} 3445}
3201 3446
3447static void gen6_enable_rps_interrupts(struct drm_device *dev)
3448{
3449 struct drm_i915_private *dev_priv = dev->dev_private;
3450
3451 spin_lock_irq(&dev_priv->irq_lock);
3452 WARN_ON(dev_priv->rps.pm_iir);
3453 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
3454 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3455 spin_unlock_irq(&dev_priv->irq_lock);
3456 /* only unmask PM interrupts we need. Mask all others. */
3457 I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
3458}
3459
3202static void gen6_enable_rps(struct drm_device *dev) 3460static void gen6_enable_rps(struct drm_device *dev)
3203{ 3461{
3204 struct drm_i915_private *dev_priv = dev->dev_private; 3462 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3250,7 +3508,10 @@ static void gen6_enable_rps(struct drm_device *dev)
3250 3508
3251 I915_WRITE(GEN6_RC_SLEEP, 0); 3509 I915_WRITE(GEN6_RC_SLEEP, 0);
3252 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 3510 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3253 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 3511 if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
3512 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3513 else
3514 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3254 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); 3515 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3255 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 3516 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3256 3517
@@ -3327,17 +3588,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3327 3588
3328 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 3589 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
3329 3590
3330 /* requires MSI enabled */ 3591 gen6_enable_rps_interrupts(dev);
3331 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
3332 spin_lock_irq(&dev_priv->rps.lock);
3333 /* FIXME: Our interrupt enabling sequence is bonghits.
3334 * dev_priv->rps.pm_iir really should be 0 here. */
3335 dev_priv->rps.pm_iir = 0;
3336 I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
3337 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3338 spin_unlock_irq(&dev_priv->rps.lock);
3339 /* unmask all PM interrupts */
3340 I915_WRITE(GEN6_PMINTRMSK, 0);
3341 3592
3342 rc6vids = 0; 3593 rc6vids = 0;
3343 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 3594 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -3356,7 +3607,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3356 gen6_gt_force_wake_put(dev_priv); 3607 gen6_gt_force_wake_put(dev_priv);
3357} 3608}
3358 3609
3359static void gen6_update_ring_freq(struct drm_device *dev) 3610void gen6_update_ring_freq(struct drm_device *dev)
3360{ 3611{
3361 struct drm_i915_private *dev_priv = dev->dev_private; 3612 struct drm_i915_private *dev_priv = dev->dev_private;
3362 int min_freq = 15; 3613 int min_freq = 15;
@@ -3482,7 +3733,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
3482 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 3733 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3483 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 3734 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3484 pcbr_offset, 3735 pcbr_offset,
3485 -1, 3736 I915_GTT_OFFSET_NONE,
3486 pctx_size); 3737 pctx_size);
3487 goto out; 3738 goto out;
3488 } 3739 }
@@ -3607,14 +3858,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
3607 3858
3608 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3859 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3609 3860
3610 /* requires MSI enabled */ 3861 gen6_enable_rps_interrupts(dev);
3611 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
3612 spin_lock_irq(&dev_priv->rps.lock);
3613 WARN_ON(dev_priv->rps.pm_iir != 0);
3614 I915_WRITE(GEN6_PMIMR, 0);
3615 spin_unlock_irq(&dev_priv->rps.lock);
3616 /* enable all PM interrupts */
3617 I915_WRITE(GEN6_PMINTRMSK, 0);
3618 3862
3619 gen6_gt_force_wake_put(dev_priv); 3863 gen6_gt_force_wake_put(dev_priv);
3620} 3864}
@@ -3708,7 +3952,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
3708 3952
3709 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 3953 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3710 intel_ring_emit(ring, MI_SET_CONTEXT); 3954 intel_ring_emit(ring, MI_SET_CONTEXT);
3711 intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | 3955 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
3712 MI_MM_SPACE_GTT | 3956 MI_MM_SPACE_GTT |
3713 MI_SAVE_EXT_STATE_EN | 3957 MI_SAVE_EXT_STATE_EN |
3714 MI_RESTORE_EXT_STATE_EN | 3958 MI_RESTORE_EXT_STATE_EN |
@@ -3731,7 +3975,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
3731 return; 3975 return;
3732 } 3976 }
3733 3977
3734 I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); 3978 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3735 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 3979 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3736} 3980}
3737 3981
@@ -4429,7 +4673,10 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
4429 struct drm_i915_private *dev_priv = dev->dev_private; 4673 struct drm_i915_private *dev_priv = dev->dev_private;
4430 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 4674 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4431 4675
4432 /* Required for FBC */ 4676 /*
4677 * Required for FBC
4678 * WaFbcDisableDpfcClockGating:ilk
4679 */
4433 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 4680 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
4434 ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 4681 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
4435 ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 4682 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
@@ -4466,6 +4713,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
4466 * The bit 7,8,9 of 0x42020. 4713 * The bit 7,8,9 of 0x42020.
4467 */ 4714 */
4468 if (IS_IRONLAKE_M(dev)) { 4715 if (IS_IRONLAKE_M(dev)) {
4716 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
4469 I915_WRITE(ILK_DISPLAY_CHICKEN1, 4717 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4470 I915_READ(ILK_DISPLAY_CHICKEN1) | 4718 I915_READ(ILK_DISPLAY_CHICKEN1) |
4471 ILK_FBCQ_DIS); 4719 ILK_FBCQ_DIS);
@@ -4602,6 +4850,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4602 * The bit5 and bit7 of 0x42020 4850 * The bit5 and bit7 of 0x42020
4603 * The bit14 of 0x70180 4851 * The bit14 of 0x70180
4604 * The bit14 of 0x71180 4852 * The bit14 of 0x71180
4853 *
4854 * WaFbcAsynchFlipDisableFbcQueue:snb
4605 */ 4855 */
4606 I915_WRITE(ILK_DISPLAY_CHICKEN1, 4856 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4607 I915_READ(ILK_DISPLAY_CHICKEN1) | 4857 I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -4614,10 +4864,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4614 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 4864 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
4615 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 4865 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
4616 4866
4617 /* WaMbcDriverBootEnable:snb */
4618 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4619 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4620
4621 g4x_disable_trickle_feed(dev); 4867 g4x_disable_trickle_feed(dev);
4622 4868
4623 /* The default value should be 0x200 according to docs, but the two 4869 /* The default value should be 0x200 according to docs, but the two
@@ -4713,10 +4959,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4713 I915_WRITE(CACHE_MODE_1, 4959 I915_WRITE(CACHE_MODE_1,
4714 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 4960 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4715 4961
4716 /* WaMbcDriverBootEnable:hsw */
4717 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4718 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4719
4720 /* WaSwitchSolVfFArbitrationPriority:hsw */ 4962 /* WaSwitchSolVfFArbitrationPriority:hsw */
4721 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 4963 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4722 4964
@@ -4800,10 +5042,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
4800 5042
4801 g4x_disable_trickle_feed(dev); 5043 g4x_disable_trickle_feed(dev);
4802 5044
4803 /* WaMbcDriverBootEnable:ivb */
4804 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4805 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4806
4807 /* WaVSRefCountFullforceMissDisable:ivb */ 5045 /* WaVSRefCountFullforceMissDisable:ivb */
4808 gen7_setup_fixed_func_scheduler(dev_priv); 5046 gen7_setup_fixed_func_scheduler(dev_priv);
4809 5047
@@ -4863,11 +5101,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
4863 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 5101 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4864 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 5102 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4865 5103
4866 /* WaMbcDriverBootEnable:vlv */
4867 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4868 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4869
4870
4871 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 5104 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4872 * gating disable must be set. Failure to set it results in 5105 * gating disable must be set. Failure to set it results in
4873 * flickering pixels due to Z write ordering failures after 5106 * flickering pixels due to Z write ordering failures after
@@ -5035,7 +5268,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
5035 case POWER_DOMAIN_TRANSCODER_B: 5268 case POWER_DOMAIN_TRANSCODER_B:
5036 case POWER_DOMAIN_TRANSCODER_C: 5269 case POWER_DOMAIN_TRANSCODER_C:
5037 return I915_READ(HSW_PWR_WELL_DRIVER) == 5270 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5038 (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE); 5271 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5039 default: 5272 default:
5040 BUG(); 5273 BUG();
5041 } 5274 }
@@ -5048,23 +5281,42 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5048 uint32_t tmp; 5281 uint32_t tmp;
5049 5282
5050 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5283 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5051 is_enabled = tmp & HSW_PWR_WELL_STATE; 5284 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5052 enable_requested = tmp & HSW_PWR_WELL_ENABLE; 5285 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
5053 5286
5054 if (enable) { 5287 if (enable) {
5055 if (!enable_requested) 5288 if (!enable_requested)
5056 I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE); 5289 I915_WRITE(HSW_PWR_WELL_DRIVER,
5290 HSW_PWR_WELL_ENABLE_REQUEST);
5057 5291
5058 if (!is_enabled) { 5292 if (!is_enabled) {
5059 DRM_DEBUG_KMS("Enabling power well\n"); 5293 DRM_DEBUG_KMS("Enabling power well\n");
5060 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 5294 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5061 HSW_PWR_WELL_STATE), 20)) 5295 HSW_PWR_WELL_STATE_ENABLED), 20))
5062 DRM_ERROR("Timeout enabling power well\n"); 5296 DRM_ERROR("Timeout enabling power well\n");
5063 } 5297 }
5064 } else { 5298 } else {
5065 if (enable_requested) { 5299 if (enable_requested) {
5300 unsigned long irqflags;
5301 enum pipe p;
5302
5066 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 5303 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5304 POSTING_READ(HSW_PWR_WELL_DRIVER);
5067 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 5305 DRM_DEBUG_KMS("Requesting to disable the power well\n");
5306
5307 /*
5308 * After this, the registers on the pipes that are part
5309 * of the power well will become zero, so we have to
5310 * adjust our counters according to that.
5311 *
5312 * FIXME: Should we do this in general in
5313 * drm_vblank_post_modeset?
5314 */
5315 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5316 for_each_pipe(p)
5317 if (p != PIPE_A)
5318 dev->last_vblank[p] = 0;
5319 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5068 } 5320 }
5069 } 5321 }
5070} 5322}
@@ -5160,10 +5412,21 @@ void intel_init_power_well(struct drm_device *dev)
5160 5412
5161 /* We're taking over the BIOS, so clear any requests made by it since 5413 /* We're taking over the BIOS, so clear any requests made by it since
5162 * the driver is in charge now. */ 5414 * the driver is in charge now. */
5163 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE) 5415 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5164 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 5416 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5165} 5417}
5166 5418
5419/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
5420void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
5421{
5422 hsw_disable_package_c8(dev_priv);
5423}
5424
5425void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5426{
5427 hsw_enable_package_c8(dev_priv);
5428}
5429
5167/* Set up chip specific power management-related functions */ 5430/* Set up chip specific power management-related functions */
5168void intel_init_pm(struct drm_device *dev) 5431void intel_init_pm(struct drm_device *dev)
5169{ 5432{
@@ -5199,8 +5462,12 @@ void intel_init_pm(struct drm_device *dev)
5199 5462
5200 /* For FIFO watermark updates */ 5463 /* For FIFO watermark updates */
5201 if (HAS_PCH_SPLIT(dev)) { 5464 if (HAS_PCH_SPLIT(dev)) {
5465 intel_setup_wm_latency(dev);
5466
5202 if (IS_GEN5(dev)) { 5467 if (IS_GEN5(dev)) {
5203 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 5468 if (dev_priv->wm.pri_latency[1] &&
5469 dev_priv->wm.spr_latency[1] &&
5470 dev_priv->wm.cur_latency[1])
5204 dev_priv->display.update_wm = ironlake_update_wm; 5471 dev_priv->display.update_wm = ironlake_update_wm;
5205 else { 5472 else {
5206 DRM_DEBUG_KMS("Failed to get proper latency. " 5473 DRM_DEBUG_KMS("Failed to get proper latency. "
@@ -5209,7 +5476,9 @@ void intel_init_pm(struct drm_device *dev)
5209 } 5476 }
5210 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 5477 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
5211 } else if (IS_GEN6(dev)) { 5478 } else if (IS_GEN6(dev)) {
5212 if (SNB_READ_WM0_LATENCY()) { 5479 if (dev_priv->wm.pri_latency[0] &&
5480 dev_priv->wm.spr_latency[0] &&
5481 dev_priv->wm.cur_latency[0]) {
5213 dev_priv->display.update_wm = sandybridge_update_wm; 5482 dev_priv->display.update_wm = sandybridge_update_wm;
5214 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 5483 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5215 } else { 5484 } else {
@@ -5219,7 +5488,9 @@ void intel_init_pm(struct drm_device *dev)
5219 } 5488 }
5220 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 5489 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
5221 } else if (IS_IVYBRIDGE(dev)) { 5490 } else if (IS_IVYBRIDGE(dev)) {
5222 if (SNB_READ_WM0_LATENCY()) { 5491 if (dev_priv->wm.pri_latency[0] &&
5492 dev_priv->wm.spr_latency[0] &&
5493 dev_priv->wm.cur_latency[0]) {
5223 dev_priv->display.update_wm = ivybridge_update_wm; 5494 dev_priv->display.update_wm = ivybridge_update_wm;
5224 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 5495 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5225 } else { 5496 } else {
@@ -5229,7 +5500,9 @@ void intel_init_pm(struct drm_device *dev)
5229 } 5500 }
5230 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 5501 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
5231 } else if (IS_HASWELL(dev)) { 5502 } else if (IS_HASWELL(dev)) {
5232 if (I915_READ64(MCH_SSKPD)) { 5503 if (dev_priv->wm.pri_latency[0] &&
5504 dev_priv->wm.spr_latency[0] &&
5505 dev_priv->wm.cur_latency[0]) {
5233 dev_priv->display.update_wm = haswell_update_wm; 5506 dev_priv->display.update_wm = haswell_update_wm;
5234 dev_priv->display.update_sprite_wm = 5507 dev_priv->display.update_sprite_wm =
5235 haswell_update_sprite_wm; 5508 haswell_update_sprite_wm;
@@ -5292,254 +5565,6 @@ void intel_init_pm(struct drm_device *dev)
5292 } 5565 }
5293} 5566}
5294 5567
5295static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
5296{
5297 u32 gt_thread_status_mask;
5298
5299 if (IS_HASWELL(dev_priv->dev))
5300 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
5301 else
5302 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
5303
5304 /* w/a for a sporadic read returning 0 by waiting for the GT
5305 * thread to wake up.
5306 */
5307 if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
5308 DRM_ERROR("GT thread status wait timed out\n");
5309}
5310
5311static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
5312{
5313 I915_WRITE_NOTRACE(FORCEWAKE, 0);
5314 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5315}
5316
5317static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5318{
5319 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
5320 FORCEWAKE_ACK_TIMEOUT_MS))
5321 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5322
5323 I915_WRITE_NOTRACE(FORCEWAKE, 1);
5324 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5325
5326 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
5327 FORCEWAKE_ACK_TIMEOUT_MS))
5328 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5329
5330 /* WaRsForcewakeWaitTC0:snb */
5331 __gen6_gt_wait_for_thread_c0(dev_priv);
5332}
5333
5334static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
5335{
5336 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
5337 /* something from same cacheline, but !FORCEWAKE_MT */
5338 POSTING_READ(ECOBUS);
5339}
5340
5341static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
5342{
5343 u32 forcewake_ack;
5344
5345 if (IS_HASWELL(dev_priv->dev))
5346 forcewake_ack = FORCEWAKE_ACK_HSW;
5347 else
5348 forcewake_ack = FORCEWAKE_MT_ACK;
5349
5350 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
5351 FORCEWAKE_ACK_TIMEOUT_MS))
5352 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5353
5354 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5355 /* something from same cacheline, but !FORCEWAKE_MT */
5356 POSTING_READ(ECOBUS);
5357
5358 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
5359 FORCEWAKE_ACK_TIMEOUT_MS))
5360 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5361
5362 /* WaRsForcewakeWaitTC0:ivb,hsw */
5363 __gen6_gt_wait_for_thread_c0(dev_priv);
5364}
5365
5366/*
5367 * Generally this is called implicitly by the register read function. However,
5368 * if some sequence requires the GT to not power down then this function should
5369 * be called at the beginning of the sequence followed by a call to
5370 * gen6_gt_force_wake_put() at the end of the sequence.
5371 */
5372void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5373{
5374 unsigned long irqflags;
5375
5376 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5377 if (dev_priv->forcewake_count++ == 0)
5378 dev_priv->gt.force_wake_get(dev_priv);
5379 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5380}
5381
5382void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
5383{
5384 u32 gtfifodbg;
5385 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
5386 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
5387 "MMIO read or write has been dropped %x\n", gtfifodbg))
5388 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
5389}
5390
5391static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5392{
5393 I915_WRITE_NOTRACE(FORCEWAKE, 0);
5394 /* something from same cacheline, but !FORCEWAKE */
5395 POSTING_READ(ECOBUS);
5396 gen6_gt_check_fifodbg(dev_priv);
5397}
5398
5399static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
5400{
5401 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5402 /* something from same cacheline, but !FORCEWAKE_MT */
5403 POSTING_READ(ECOBUS);
5404 gen6_gt_check_fifodbg(dev_priv);
5405}
5406
5407/*
5408 * see gen6_gt_force_wake_get()
5409 */
5410void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5411{
5412 unsigned long irqflags;
5413
5414 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5415 if (--dev_priv->forcewake_count == 0)
5416 dev_priv->gt.force_wake_put(dev_priv);
5417 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5418}
5419
5420int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
5421{
5422 int ret = 0;
5423
5424 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
5425 int loop = 500;
5426 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5427 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
5428 udelay(10);
5429 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5430 }
5431 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
5432 ++ret;
5433 dev_priv->gt_fifo_count = fifo;
5434 }
5435 dev_priv->gt_fifo_count--;
5436
5437 return ret;
5438}
5439
5440static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
5441{
5442 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
5443 /* something from same cacheline, but !FORCEWAKE_VLV */
5444 POSTING_READ(FORCEWAKE_ACK_VLV);
5445}
5446
5447static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
5448{
5449 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
5450 FORCEWAKE_ACK_TIMEOUT_MS))
5451 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5452
5453 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5454 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5455 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5456
5457 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
5458 FORCEWAKE_ACK_TIMEOUT_MS))
5459 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
5460
5461 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
5462 FORCEWAKE_KERNEL),
5463 FORCEWAKE_ACK_TIMEOUT_MS))
5464 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
5465
5466 /* WaRsForcewakeWaitTC0:vlv */
5467 __gen6_gt_wait_for_thread_c0(dev_priv);
5468}
5469
5470static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
5471{
5472 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5473 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5474 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5475 /* The below doubles as a POSTING_READ */
5476 gen6_gt_check_fifodbg(dev_priv);
5477}
5478
5479void intel_gt_reset(struct drm_device *dev)
5480{
5481 struct drm_i915_private *dev_priv = dev->dev_private;
5482
5483 if (IS_VALLEYVIEW(dev)) {
5484 vlv_force_wake_reset(dev_priv);
5485 } else if (INTEL_INFO(dev)->gen >= 6) {
5486 __gen6_gt_force_wake_reset(dev_priv);
5487 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5488 __gen6_gt_force_wake_mt_reset(dev_priv);
5489 }
5490}
5491
5492void intel_gt_init(struct drm_device *dev)
5493{
5494 struct drm_i915_private *dev_priv = dev->dev_private;
5495
5496 spin_lock_init(&dev_priv->gt_lock);
5497
5498 intel_gt_reset(dev);
5499
5500 if (IS_VALLEYVIEW(dev)) {
5501 dev_priv->gt.force_wake_get = vlv_force_wake_get;
5502 dev_priv->gt.force_wake_put = vlv_force_wake_put;
5503 } else if (IS_HASWELL(dev)) {
5504 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
5505 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
5506 } else if (IS_IVYBRIDGE(dev)) {
5507 u32 ecobus;
5508
5509 /* IVB configs may use multi-threaded forcewake */
5510
5511 /* A small trick here - if the bios hasn't configured
5512 * MT forcewake, and if the device is in RC6, then
5513 * force_wake_mt_get will not wake the device and the
5514 * ECOBUS read will return zero. Which will be
5515 * (correctly) interpreted by the test below as MT
5516 * forcewake being disabled.
5517 */
5518 mutex_lock(&dev->struct_mutex);
5519 __gen6_gt_force_wake_mt_get(dev_priv);
5520 ecobus = I915_READ_NOTRACE(ECOBUS);
5521 __gen6_gt_force_wake_mt_put(dev_priv);
5522 mutex_unlock(&dev->struct_mutex);
5523
5524 if (ecobus & FORCEWAKE_MT_ENABLE) {
5525 dev_priv->gt.force_wake_get =
5526 __gen6_gt_force_wake_mt_get;
5527 dev_priv->gt.force_wake_put =
5528 __gen6_gt_force_wake_mt_put;
5529 } else {
5530 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
5531 DRM_INFO("when using vblank-synced partial screen updates.\n");
5532 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5533 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5534 }
5535 } else if (IS_GEN6(dev)) {
5536 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5537 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5538 }
5539 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5540 intel_gen6_powersave_work);
5541}
5542
5543int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) 5568int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5544{ 5569{
5545 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5570 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -5642,3 +5667,11 @@ int vlv_freq_opcode(int ddr_freq, int val)
5642 return val; 5667 return val;
5643} 5668}
5644 5669
5670void intel_pm_init(struct drm_device *dev)
5671{
5672 struct drm_i915_private *dev_priv = dev->dev_private;
5673
5674 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5675 intel_gen6_powersave_work);
5676}
5677
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 664118d8c1d6..f05cceac5a52 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
440 * registers with the above sequence (the readback of the HEAD registers 440 * registers with the above sequence (the readback of the HEAD registers
441 * also enforces ordering), otherwise the hw might lose the new ring 441 * also enforces ordering), otherwise the hw might lose the new ring
442 * register values. */ 442 * register values. */
443 I915_WRITE_START(ring, obj->gtt_offset); 443 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
444 I915_WRITE_CTL(ring, 444 I915_WRITE_CTL(ring,
445 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 445 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
446 | RING_VALID); 446 | RING_VALID);
447 447
448 /* If the head is still not zero, the ring is dead */ 448 /* If the head is still not zero, the ring is dead */
449 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 449 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
450 I915_READ_START(ring) == obj->gtt_offset && 450 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
451 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 451 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
452 DRM_ERROR("%s initialization failed " 452 DRM_ERROR("%s initialization failed "
453 "ctl %08x head %08x tail %08x start %08x\n", 453 "ctl %08x head %08x tail %08x start %08x\n",
@@ -501,11 +501,11 @@ init_pipe_control(struct intel_ring_buffer *ring)
501 501
502 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 502 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
503 503
504 ret = i915_gem_object_pin(obj, 4096, true, false); 504 ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
505 if (ret) 505 if (ret)
506 goto err_unref; 506 goto err_unref;
507 507
508 pc->gtt_offset = obj->gtt_offset; 508 pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
509 pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 509 pc->cpu_page = kmap(sg_page(obj->pages->sgl));
510 if (pc->cpu_page == NULL) { 510 if (pc->cpu_page == NULL) {
511 ret = -ENOMEM; 511 ret = -ENOMEM;
@@ -836,11 +836,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
836 return false; 836 return false;
837 837
838 spin_lock_irqsave(&dev_priv->irq_lock, flags); 838 spin_lock_irqsave(&dev_priv->irq_lock, flags);
839 if (ring->irq_refcount.gt++ == 0) { 839 if (ring->irq_refcount++ == 0)
840 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 840 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
841 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
842 POSTING_READ(GTIMR);
843 }
844 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 841 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
845 842
846 return true; 843 return true;
@@ -854,11 +851,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
854 unsigned long flags; 851 unsigned long flags;
855 852
856 spin_lock_irqsave(&dev_priv->irq_lock, flags); 853 spin_lock_irqsave(&dev_priv->irq_lock, flags);
857 if (--ring->irq_refcount.gt == 0) { 854 if (--ring->irq_refcount == 0)
858 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 855 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
859 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
860 POSTING_READ(GTIMR);
861 }
862 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 856 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
863} 857}
864 858
@@ -873,7 +867,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
873 return false; 867 return false;
874 868
875 spin_lock_irqsave(&dev_priv->irq_lock, flags); 869 spin_lock_irqsave(&dev_priv->irq_lock, flags);
876 if (ring->irq_refcount.gt++ == 0) { 870 if (ring->irq_refcount++ == 0) {
877 dev_priv->irq_mask &= ~ring->irq_enable_mask; 871 dev_priv->irq_mask &= ~ring->irq_enable_mask;
878 I915_WRITE(IMR, dev_priv->irq_mask); 872 I915_WRITE(IMR, dev_priv->irq_mask);
879 POSTING_READ(IMR); 873 POSTING_READ(IMR);
@@ -891,7 +885,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
891 unsigned long flags; 885 unsigned long flags;
892 886
893 spin_lock_irqsave(&dev_priv->irq_lock, flags); 887 spin_lock_irqsave(&dev_priv->irq_lock, flags);
894 if (--ring->irq_refcount.gt == 0) { 888 if (--ring->irq_refcount == 0) {
895 dev_priv->irq_mask |= ring->irq_enable_mask; 889 dev_priv->irq_mask |= ring->irq_enable_mask;
896 I915_WRITE(IMR, dev_priv->irq_mask); 890 I915_WRITE(IMR, dev_priv->irq_mask);
897 POSTING_READ(IMR); 891 POSTING_READ(IMR);
@@ -910,7 +904,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
910 return false; 904 return false;
911 905
912 spin_lock_irqsave(&dev_priv->irq_lock, flags); 906 spin_lock_irqsave(&dev_priv->irq_lock, flags);
913 if (ring->irq_refcount.gt++ == 0) { 907 if (ring->irq_refcount++ == 0) {
914 dev_priv->irq_mask &= ~ring->irq_enable_mask; 908 dev_priv->irq_mask &= ~ring->irq_enable_mask;
915 I915_WRITE16(IMR, dev_priv->irq_mask); 909 I915_WRITE16(IMR, dev_priv->irq_mask);
916 POSTING_READ16(IMR); 910 POSTING_READ16(IMR);
@@ -928,7 +922,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
928 unsigned long flags; 922 unsigned long flags;
929 923
930 spin_lock_irqsave(&dev_priv->irq_lock, flags); 924 spin_lock_irqsave(&dev_priv->irq_lock, flags);
931 if (--ring->irq_refcount.gt == 0) { 925 if (--ring->irq_refcount == 0) {
932 dev_priv->irq_mask |= ring->irq_enable_mask; 926 dev_priv->irq_mask |= ring->irq_enable_mask;
933 I915_WRITE16(IMR, dev_priv->irq_mask); 927 I915_WRITE16(IMR, dev_priv->irq_mask);
934 POSTING_READ16(IMR); 928 POSTING_READ16(IMR);
@@ -968,6 +962,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
968 962
969 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 963 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
970 POSTING_READ(mmio); 964 POSTING_READ(mmio);
965
966 /* Flush the TLB for this page */
967 if (INTEL_INFO(dev)->gen >= 6) {
968 u32 reg = RING_INSTPM(ring->mmio_base);
969 I915_WRITE(reg,
970 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
971 INSTPM_SYNC_FLUSH));
972 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
973 1000))
974 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
975 ring->name);
976 }
971} 977}
972 978
973static int 979static int
@@ -1021,16 +1027,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
1021 gen6_gt_force_wake_get(dev_priv); 1027 gen6_gt_force_wake_get(dev_priv);
1022 1028
1023 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1029 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1024 if (ring->irq_refcount.gt++ == 0) { 1030 if (ring->irq_refcount++ == 0) {
1025 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1031 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1026 I915_WRITE_IMR(ring, 1032 I915_WRITE_IMR(ring,
1027 ~(ring->irq_enable_mask | 1033 ~(ring->irq_enable_mask |
1028 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1034 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1029 else 1035 else
1030 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1036 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1031 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 1037 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1032 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1033 POSTING_READ(GTIMR);
1034 } 1038 }
1035 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1039 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1036 1040
@@ -1045,15 +1049,13 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1045 unsigned long flags; 1049 unsigned long flags;
1046 1050
1047 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1051 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1048 if (--ring->irq_refcount.gt == 0) { 1052 if (--ring->irq_refcount == 0) {
1049 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1053 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1050 I915_WRITE_IMR(ring, 1054 I915_WRITE_IMR(ring,
1051 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1055 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1052 else 1056 else
1053 I915_WRITE_IMR(ring, ~0); 1057 I915_WRITE_IMR(ring, ~0);
1054 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 1058 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1055 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1056 POSTING_READ(GTIMR);
1057 } 1059 }
1058 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1060 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1059 1061
@@ -1070,14 +1072,12 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1070 if (!dev->irq_enabled) 1072 if (!dev->irq_enabled)
1071 return false; 1073 return false;
1072 1074
1073 spin_lock_irqsave(&dev_priv->rps.lock, flags); 1075 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1074 if (ring->irq_refcount.pm++ == 0) { 1076 if (ring->irq_refcount++ == 0) {
1075 u32 pm_imr = I915_READ(GEN6_PMIMR);
1076 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1077 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1077 I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); 1078 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1078 POSTING_READ(GEN6_PMIMR);
1079 } 1079 }
1080 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 1080 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1081 1081
1082 return true; 1082 return true;
1083} 1083}
@@ -1092,14 +1092,12 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1092 if (!dev->irq_enabled) 1092 if (!dev->irq_enabled)
1093 return; 1093 return;
1094 1094
1095 spin_lock_irqsave(&dev_priv->rps.lock, flags); 1095 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1096 if (--ring->irq_refcount.pm == 0) { 1096 if (--ring->irq_refcount == 0) {
1097 u32 pm_imr = I915_READ(GEN6_PMIMR);
1098 I915_WRITE_IMR(ring, ~0); 1097 I915_WRITE_IMR(ring, ~0);
1099 I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); 1098 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1100 POSTING_READ(GEN6_PMIMR);
1101 } 1099 }
1102 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 1100 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1103} 1101}
1104 1102
1105static int 1103static int
@@ -1144,7 +1142,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1144 intel_ring_advance(ring); 1142 intel_ring_advance(ring);
1145 } else { 1143 } else {
1146 struct drm_i915_gem_object *obj = ring->private; 1144 struct drm_i915_gem_object *obj = ring->private;
1147 u32 cs_offset = obj->gtt_offset; 1145 u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
1148 1146
1149 if (len > I830_BATCH_LIMIT) 1147 if (len > I830_BATCH_LIMIT)
1150 return -ENOSPC; 1148 return -ENOSPC;
@@ -1224,12 +1222,12 @@ static int init_status_page(struct intel_ring_buffer *ring)
1224 1222
1225 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1223 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1226 1224
1227 ret = i915_gem_object_pin(obj, 4096, true, false); 1225 ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
1228 if (ret != 0) { 1226 if (ret != 0) {
1229 goto err_unref; 1227 goto err_unref;
1230 } 1228 }
1231 1229
1232 ring->status_page.gfx_addr = obj->gtt_offset; 1230 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1233 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1231 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1234 if (ring->status_page.page_addr == NULL) { 1232 if (ring->status_page.page_addr == NULL) {
1235 ret = -ENOMEM; 1233 ret = -ENOMEM;
@@ -1307,7 +1305,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1307 1305
1308 ring->obj = obj; 1306 ring->obj = obj;
1309 1307
1310 ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); 1308 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
1311 if (ret) 1309 if (ret)
1312 goto err_unref; 1310 goto err_unref;
1313 1311
@@ -1316,7 +1314,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1316 goto err_unpin; 1314 goto err_unpin;
1317 1315
1318 ring->virtual_start = 1316 ring->virtual_start =
1319 ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, 1317 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1320 ring->size); 1318 ring->size);
1321 if (ring->virtual_start == NULL) { 1319 if (ring->virtual_start == NULL) {
1322 DRM_ERROR("Failed to map ringbuffer.\n"); 1320 DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1594,6 +1592,8 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1594 if (INTEL_INFO(ring->dev)->gen >= 6) { 1592 if (INTEL_INFO(ring->dev)->gen >= 6) {
1595 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1593 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1596 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 1594 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1595 if (HAS_VEBOX(ring->dev))
1596 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1597 } 1597 }
1598 1598
1599 ring->set_seqno(ring, seqno); 1599 ring->set_seqno(ring, seqno);
@@ -1828,7 +1828,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1828 return -ENOMEM; 1828 return -ENOMEM;
1829 } 1829 }
1830 1830
1831 ret = i915_gem_object_pin(obj, 0, true, false); 1831 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
1832 if (ret != 0) { 1832 if (ret != 0) {
1833 drm_gem_object_unreference(&obj->base); 1833 drm_gem_object_unreference(&obj->base);
1834 DRM_ERROR("Failed to ping batch bo\n"); 1834 DRM_ERROR("Failed to ping batch bo\n");
@@ -2008,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
2008 ring->add_request = gen6_add_request; 2008 ring->add_request = gen6_add_request;
2009 ring->get_seqno = gen6_ring_get_seqno; 2009 ring->get_seqno = gen6_ring_get_seqno;
2010 ring->set_seqno = ring_set_seqno; 2010 ring->set_seqno = ring_set_seqno;
2011 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT | 2011 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2012 PM_VEBOX_CS_ERROR_INTERRUPT;
2013 ring->irq_get = hsw_vebox_get_irq; 2012 ring->irq_get = hsw_vebox_get_irq;
2014 ring->irq_put = hsw_vebox_put_irq; 2013 ring->irq_put = hsw_vebox_put_irq;
2015 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2014 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 799f04c9da45..432ad5311ba6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -33,11 +33,12 @@ struct intel_hw_status_page {
33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35 35
36#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) 36enum intel_ring_hangcheck_action {
37#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) 37 HANGCHECK_WAIT,
38#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) 38 HANGCHECK_ACTIVE,
39 39 HANGCHECK_KICK,
40enum intel_ring_hangcheck_action { wait, active, kick, hung }; 40 HANGCHECK_HUNG,
41};
41 42
42struct intel_ring_hangcheck { 43struct intel_ring_hangcheck {
43 bool deadlock; 44 bool deadlock;
@@ -78,10 +79,7 @@ struct intel_ring_buffer {
78 */ 79 */
79 u32 last_retired_head; 80 u32 last_retired_head;
80 81
81 struct { 82 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
82 u32 gt; /* protected by dev_priv->irq_lock */
83 u32 pm; /* protected by dev_priv->rps.lock (sucks) */
84 } irq_refcount;
85 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 83 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
86 u32 trace_irq_seqno; 84 u32 trace_irq_seqno;
87 u32 sync_seqno[I915_NUM_RINGS-1]; 85 u32 sync_seqno[I915_NUM_RINGS-1];
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2628d5622449..317e058fb3cf 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -202,15 +202,14 @@ struct intel_sdvo_connector {
202 u32 cur_dot_crawl, max_dot_crawl; 202 u32 cur_dot_crawl, max_dot_crawl;
203}; 203};
204 204
205static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) 205static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
206{ 206{
207 return container_of(encoder, struct intel_sdvo, base.base); 207 return container_of(encoder, struct intel_sdvo, base);
208} 208}
209 209
210static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) 210static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
211{ 211{
212 return container_of(intel_attached_encoder(connector), 212 return to_sdvo(intel_attached_encoder(connector));
213 struct intel_sdvo, base);
214} 213}
215 214
216static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) 215static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -539,7 +538,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
539 &status)) 538 &status))
540 goto log_fail; 539 goto log_fail;
541 540
542 while (status == SDVO_CMD_STATUS_PENDING && --retry) { 541 while ((status == SDVO_CMD_STATUS_PENDING ||
542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
543 if (retry < 10) 543 if (retry < 10)
544 msleep(15); 544 msleep(15);
545 else 545 else
@@ -964,30 +964,32 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
964static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, 964static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
965 const struct drm_display_mode *adjusted_mode) 965 const struct drm_display_mode *adjusted_mode)
966{ 966{
967 struct dip_infoframe avi_if = { 967 uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
968 .type = DIP_TYPE_AVI, 968 struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
969 .ver = DIP_VERSION_AVI, 969 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
970 .len = DIP_LEN_AVI, 970 union hdmi_infoframe frame;
971 }; 971 int ret;
972 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; 972 ssize_t len;
973 struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc); 973
974 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
975 adjusted_mode);
976 if (ret < 0) {
977 DRM_ERROR("couldn't fill AVI infoframe\n");
978 return false;
979 }
974 980
975 if (intel_sdvo->rgb_quant_range_selectable) { 981 if (intel_sdvo->rgb_quant_range_selectable) {
976 if (intel_crtc->config.limited_color_range) 982 if (intel_crtc->config.limited_color_range)
977 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; 983 frame.avi.quantization_range =
984 HDMI_QUANTIZATION_RANGE_LIMITED;
978 else 985 else
979 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; 986 frame.avi.quantization_range =
987 HDMI_QUANTIZATION_RANGE_FULL;
980 } 988 }
981 989
982 avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); 990 len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
983 991 if (len < 0)
984 intel_dip_infoframe_csum(&avi_if); 992 return false;
985
986 /* sdvo spec says that the ecc is handled by the hw, and it looks like
987 * we must not send the ecc field, either. */
988 memcpy(sdvo_data, &avi_if, 3);
989 sdvo_data[3] = avi_if.checksum;
990 memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
991 993
992 return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, 994 return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
993 SDVO_HBUF_TX_VSYNC, 995 SDVO_HBUF_TX_VSYNC,
@@ -1084,7 +1086,7 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
1084static bool intel_sdvo_compute_config(struct intel_encoder *encoder, 1086static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1085 struct intel_crtc_config *pipe_config) 1087 struct intel_crtc_config *pipe_config)
1086{ 1088{
1087 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1089 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1088 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 1090 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
1089 struct drm_display_mode *mode = &pipe_config->requested_mode; 1091 struct drm_display_mode *mode = &pipe_config->requested_mode;
1090 1092
@@ -1154,7 +1156,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1154 struct drm_display_mode *adjusted_mode = 1156 struct drm_display_mode *adjusted_mode =
1155 &intel_crtc->config.adjusted_mode; 1157 &intel_crtc->config.adjusted_mode;
1156 struct drm_display_mode *mode = &intel_crtc->config.requested_mode; 1158 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
1157 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base); 1159 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
1158 u32 sdvox; 1160 u32 sdvox;
1159 struct intel_sdvo_in_out_map in_out; 1161 struct intel_sdvo_in_out_map in_out;
1160 struct intel_sdvo_dtd input_dtd, output_dtd; 1162 struct intel_sdvo_dtd input_dtd, output_dtd;
@@ -1292,7 +1294,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1292{ 1294{
1293 struct drm_device *dev = encoder->base.dev; 1295 struct drm_device *dev = encoder->base.dev;
1294 struct drm_i915_private *dev_priv = dev->dev_private; 1296 struct drm_i915_private *dev_priv = dev->dev_private;
1295 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1297 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1296 u16 active_outputs = 0; 1298 u16 active_outputs = 0;
1297 u32 tmp; 1299 u32 tmp;
1298 1300
@@ -1315,7 +1317,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1315{ 1317{
1316 struct drm_device *dev = encoder->base.dev; 1318 struct drm_device *dev = encoder->base.dev;
1317 struct drm_i915_private *dev_priv = dev->dev_private; 1319 struct drm_i915_private *dev_priv = dev->dev_private;
1318 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1320 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1319 struct intel_sdvo_dtd dtd; 1321 struct intel_sdvo_dtd dtd;
1320 int encoder_pixel_multiplier = 0; 1322 int encoder_pixel_multiplier = 0;
1321 u32 flags = 0, sdvox; 1323 u32 flags = 0, sdvox;
@@ -1357,22 +1359,21 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1357 } 1359 }
1358 1360
1359 /* Cross check the port pixel multiplier with the sdvo encoder state. */ 1361 /* Cross check the port pixel multiplier with the sdvo encoder state. */
1360 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1); 1362 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
1361 switch (val) { 1363 &val, 1)) {
1362 case SDVO_CLOCK_RATE_MULT_1X: 1364 switch (val) {
1363 encoder_pixel_multiplier = 1; 1365 case SDVO_CLOCK_RATE_MULT_1X:
1364 break; 1366 encoder_pixel_multiplier = 1;
1365 case SDVO_CLOCK_RATE_MULT_2X: 1367 break;
1366 encoder_pixel_multiplier = 2; 1368 case SDVO_CLOCK_RATE_MULT_2X:
1367 break; 1369 encoder_pixel_multiplier = 2;
1368 case SDVO_CLOCK_RATE_MULT_4X: 1370 break;
1369 encoder_pixel_multiplier = 4; 1371 case SDVO_CLOCK_RATE_MULT_4X:
1370 break; 1372 encoder_pixel_multiplier = 4;
1373 break;
1374 }
1371 } 1375 }
1372 1376
1373 if(HAS_PCH_SPLIT(dev))
1374 return; /* no pixel multiplier readout support yet */
1375
1376 WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, 1377 WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
1377 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", 1378 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
1378 pipe_config->pixel_multiplier, encoder_pixel_multiplier); 1379 pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1381,7 +1382,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1381static void intel_disable_sdvo(struct intel_encoder *encoder) 1382static void intel_disable_sdvo(struct intel_encoder *encoder)
1382{ 1383{
1383 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1384 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1384 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1385 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1385 u32 temp; 1386 u32 temp;
1386 1387
1387 intel_sdvo_set_active_outputs(intel_sdvo, 0); 1388 intel_sdvo_set_active_outputs(intel_sdvo, 0);
@@ -1423,7 +1424,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
1423{ 1424{
1424 struct drm_device *dev = encoder->base.dev; 1425 struct drm_device *dev = encoder->base.dev;
1425 struct drm_i915_private *dev_priv = dev->dev_private; 1426 struct drm_i915_private *dev_priv = dev->dev_private;
1426 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1427 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1427 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1428 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1428 u32 temp; 1429 u32 temp;
1429 bool input1, input2; 1430 bool input1, input2;
@@ -1584,7 +1585,7 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
1584 1585
1585static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) 1586static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
1586{ 1587{
1587 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1588 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1588 1589
1589 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, 1590 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
1590 &intel_sdvo->hotplug_active, 2); 1591 &intel_sdvo->hotplug_active, 2);
@@ -1697,6 +1698,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1697 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1698 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1698 enum drm_connector_status ret; 1699 enum drm_connector_status ret;
1699 1700
1701 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1702 connector->base.id, drm_get_connector_name(connector));
1703
1700 if (!intel_sdvo_get_value(intel_sdvo, 1704 if (!intel_sdvo_get_value(intel_sdvo,
1701 SDVO_CMD_GET_ATTACHED_DISPLAYS, 1705 SDVO_CMD_GET_ATTACHED_DISPLAYS,
1702 &response, 2)) 1706 &response, 2))
@@ -2188,7 +2192,7 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
2188 2192
2189static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 2193static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
2190{ 2194{
2191 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 2195 struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
2192 2196
2193 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) 2197 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
2194 drm_mode_destroy(encoder->dev, 2198 drm_mode_destroy(encoder->dev,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1fa5612a4572..78b621cdd108 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -38,7 +38,8 @@
38#include "i915_drv.h" 38#include "i915_drv.h"
39 39
40static void 40static void
41vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, 41vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
42 struct drm_framebuffer *fb,
42 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 43 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h, 44 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t x, uint32_t y, 45 uint32_t x, uint32_t y,
@@ -108,14 +109,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
108 109
109 sprctl |= SP_ENABLE; 110 sprctl |= SP_ENABLE;
110 111
112 intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
113 src_w != crtc_w || src_h != crtc_h);
114
111 /* Sizes are 0 based */ 115 /* Sizes are 0 based */
112 src_w--; 116 src_w--;
113 src_h--; 117 src_h--;
114 crtc_w--; 118 crtc_w--;
115 crtc_h--; 119 crtc_h--;
116 120
117 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
118
119 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); 121 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
120 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); 122 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
121 123
@@ -133,13 +135,13 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
133 135
134 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); 136 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
135 I915_WRITE(SPCNTR(pipe, plane), sprctl); 137 I915_WRITE(SPCNTR(pipe, plane), sprctl);
136 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset + 138 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
137 sprsurf_offset); 139 sprsurf_offset);
138 POSTING_READ(SPSURF(pipe, plane)); 140 POSTING_READ(SPSURF(pipe, plane));
139} 141}
140 142
141static void 143static void
142vlv_disable_plane(struct drm_plane *dplane) 144vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
143{ 145{
144 struct drm_device *dev = dplane->dev; 146 struct drm_device *dev = dplane->dev;
145 struct drm_i915_private *dev_priv = dev->dev_private; 147 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -152,6 +154,8 @@ vlv_disable_plane(struct drm_plane *dplane)
152 /* Activate double buffered register update */ 154 /* Activate double buffered register update */
153 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0); 155 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
154 POSTING_READ(SPSURF(pipe, plane)); 156 POSTING_READ(SPSURF(pipe, plane));
157
158 intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
155} 159}
156 160
157static int 161static int
@@ -206,7 +210,8 @@ vlv_get_colorkey(struct drm_plane *dplane,
206} 210}
207 211
208static void 212static void
209ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, 213ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
214 struct drm_framebuffer *fb,
210 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 215 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
211 unsigned int crtc_w, unsigned int crtc_h, 216 unsigned int crtc_w, unsigned int crtc_h,
212 uint32_t x, uint32_t y, 217 uint32_t x, uint32_t y,
@@ -262,14 +267,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
262 if (IS_HASWELL(dev)) 267 if (IS_HASWELL(dev))
263 sprctl |= SPRITE_PIPE_CSC_ENABLE; 268 sprctl |= SPRITE_PIPE_CSC_ENABLE;
264 269
270 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
271 src_w != crtc_w || src_h != crtc_h);
272
265 /* Sizes are 0 based */ 273 /* Sizes are 0 based */
266 src_w--; 274 src_w--;
267 src_h--; 275 src_h--;
268 crtc_w--; 276 crtc_w--;
269 crtc_h--; 277 crtc_h--;
270 278
271 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
272
273 /* 279 /*
274 * IVB workaround: must disable low power watermarks for at least 280 * IVB workaround: must disable low power watermarks for at least
275 * one frame before enabling scaling. LP watermarks can be re-enabled 281 * one frame before enabling scaling. LP watermarks can be re-enabled
@@ -308,7 +314,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
308 if (intel_plane->can_scale) 314 if (intel_plane->can_scale)
309 I915_WRITE(SPRSCALE(pipe), sprscale); 315 I915_WRITE(SPRSCALE(pipe), sprscale);
310 I915_WRITE(SPRCTL(pipe), sprctl); 316 I915_WRITE(SPRCTL(pipe), sprctl);
311 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); 317 I915_MODIFY_DISPBASE(SPRSURF(pipe),
318 i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
312 POSTING_READ(SPRSURF(pipe)); 319 POSTING_READ(SPRSURF(pipe));
313 320
314 /* potentially re-enable LP watermarks */ 321 /* potentially re-enable LP watermarks */
@@ -317,7 +324,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
317} 324}
318 325
319static void 326static void
320ivb_disable_plane(struct drm_plane *plane) 327ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
321{ 328{
322 struct drm_device *dev = plane->dev; 329 struct drm_device *dev = plane->dev;
323 struct drm_i915_private *dev_priv = dev->dev_private; 330 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -335,7 +342,7 @@ ivb_disable_plane(struct drm_plane *plane)
335 342
336 dev_priv->sprite_scaling_enabled &= ~(1 << pipe); 343 dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
337 344
338 intel_update_sprite_watermarks(dev, pipe, 0, 0, false); 345 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
339 346
340 /* potentially re-enable LP watermarks */ 347 /* potentially re-enable LP watermarks */
341 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 348 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
@@ -397,7 +404,8 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
397} 404}
398 405
399static void 406static void
400ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, 407ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
408 struct drm_framebuffer *fb,
401 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 409 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
402 unsigned int crtc_w, unsigned int crtc_h, 410 unsigned int crtc_w, unsigned int crtc_h,
403 uint32_t x, uint32_t y, 411 uint32_t x, uint32_t y,
@@ -449,14 +457,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
449 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ 457 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
450 dvscntr |= DVS_ENABLE; 458 dvscntr |= DVS_ENABLE;
451 459
460 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
461 src_w != crtc_w || src_h != crtc_h);
462
452 /* Sizes are 0 based */ 463 /* Sizes are 0 based */
453 src_w--; 464 src_w--;
454 src_h--; 465 src_h--;
455 crtc_w--; 466 crtc_w--;
456 crtc_h--; 467 crtc_h--;
457 468
458 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
459
460 dvsscale = 0; 469 dvsscale = 0;
461 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) 470 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
462 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 471 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -478,12 +487,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
478 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); 487 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
479 I915_WRITE(DVSSCALE(pipe), dvsscale); 488 I915_WRITE(DVSSCALE(pipe), dvsscale);
480 I915_WRITE(DVSCNTR(pipe), dvscntr); 489 I915_WRITE(DVSCNTR(pipe), dvscntr);
481 I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); 490 I915_MODIFY_DISPBASE(DVSSURF(pipe),
491 i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
482 POSTING_READ(DVSSURF(pipe)); 492 POSTING_READ(DVSSURF(pipe));
483} 493}
484 494
485static void 495static void
486ilk_disable_plane(struct drm_plane *plane) 496ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
487{ 497{
488 struct drm_device *dev = plane->dev; 498 struct drm_device *dev = plane->dev;
489 struct drm_i915_private *dev_priv = dev->dev_private; 499 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,6 +506,8 @@ ilk_disable_plane(struct drm_plane *plane)
496 /* Flush double buffered register updates */ 506 /* Flush double buffered register updates */
497 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0); 507 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
498 POSTING_READ(DVSSURF(pipe)); 508 POSTING_READ(DVSSURF(pipe));
509
510 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
499} 511}
500 512
501static void 513static void
@@ -818,11 +830,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
818 intel_enable_primary(crtc); 830 intel_enable_primary(crtc);
819 831
820 if (visible) 832 if (visible)
821 intel_plane->update_plane(plane, fb, obj, 833 intel_plane->update_plane(plane, crtc, fb, obj,
822 crtc_x, crtc_y, crtc_w, crtc_h, 834 crtc_x, crtc_y, crtc_w, crtc_h,
823 src_x, src_y, src_w, src_h); 835 src_x, src_y, src_w, src_h);
824 else 836 else
825 intel_plane->disable_plane(plane); 837 intel_plane->disable_plane(plane, crtc);
826 838
827 if (disable_primary) 839 if (disable_primary)
828 intel_disable_primary(crtc); 840 intel_disable_primary(crtc);
@@ -855,9 +867,14 @@ intel_disable_plane(struct drm_plane *plane)
855 struct intel_plane *intel_plane = to_intel_plane(plane); 867 struct intel_plane *intel_plane = to_intel_plane(plane);
856 int ret = 0; 868 int ret = 0;
857 869
858 if (plane->crtc) 870 if (!plane->fb)
859 intel_enable_primary(plane->crtc); 871 return 0;
860 intel_plane->disable_plane(plane); 872
873 if (WARN_ON(!plane->crtc))
874 return -EINVAL;
875
876 intel_enable_primary(plane->crtc);
877 intel_plane->disable_plane(plane, plane->crtc);
861 878
862 if (!intel_plane->obj) 879 if (!intel_plane->obj)
863 goto out; 880 goto out;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 39debd80d190..f2c6d7909ae2 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -823,16 +823,14 @@ static const struct tv_mode tv_modes[] = {
823 }, 823 },
824}; 824};
825 825
826static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) 826static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
827{ 827{
828 return container_of(encoder, struct intel_tv, base.base); 828 return container_of(encoder, struct intel_tv, base);
829} 829}
830 830
831static struct intel_tv *intel_attached_tv(struct drm_connector *connector) 831static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
832{ 832{
833 return container_of(intel_attached_encoder(connector), 833 return enc_to_tv(intel_attached_encoder(connector));
834 struct intel_tv,
835 base);
836} 834}
837 835
838static bool 836static bool
@@ -908,7 +906,7 @@ static bool
908intel_tv_compute_config(struct intel_encoder *encoder, 906intel_tv_compute_config(struct intel_encoder *encoder,
909 struct intel_crtc_config *pipe_config) 907 struct intel_crtc_config *pipe_config)
910{ 908{
911 struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base); 909 struct intel_tv *intel_tv = enc_to_tv(encoder);
912 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 910 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
913 911
914 if (!tv_mode) 912 if (!tv_mode)
@@ -921,15 +919,12 @@ intel_tv_compute_config(struct intel_encoder *encoder,
921 return true; 919 return true;
922} 920}
923 921
924static void 922static void intel_tv_mode_set(struct intel_encoder *encoder)
925intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
926 struct drm_display_mode *adjusted_mode)
927{ 923{
928 struct drm_device *dev = encoder->dev; 924 struct drm_device *dev = encoder->base.dev;
929 struct drm_i915_private *dev_priv = dev->dev_private; 925 struct drm_i915_private *dev_priv = dev->dev_private;
930 struct drm_crtc *crtc = encoder->crtc; 926 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
931 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 927 struct intel_tv *intel_tv = enc_to_tv(encoder);
932 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
933 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 928 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
934 u32 tv_ctl; 929 u32 tv_ctl;
935 u32 hctl1, hctl2, hctl3; 930 u32 hctl1, hctl2, hctl3;
@@ -1305,6 +1300,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1305 struct intel_tv *intel_tv = intel_attached_tv(connector); 1300 struct intel_tv *intel_tv = intel_attached_tv(connector);
1306 int type; 1301 int type;
1307 1302
1303 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
1304 connector->base.id, drm_get_connector_name(connector),
1305 force);
1306
1308 mode = reported_modes[0]; 1307 mode = reported_modes[0];
1309 1308
1310 if (force) { 1309 if (force) {
@@ -1483,10 +1482,6 @@ out:
1483 return ret; 1482 return ret;
1484} 1483}
1485 1484
1486static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1487 .mode_set = intel_tv_mode_set,
1488};
1489
1490static const struct drm_connector_funcs intel_tv_connector_funcs = { 1485static const struct drm_connector_funcs intel_tv_connector_funcs = {
1491 .dpms = intel_connector_dpms, 1486 .dpms = intel_connector_dpms,
1492 .detect = intel_tv_detect, 1487 .detect = intel_tv_detect,
@@ -1619,6 +1614,7 @@ intel_tv_init(struct drm_device *dev)
1619 DRM_MODE_ENCODER_TVDAC); 1614 DRM_MODE_ENCODER_TVDAC);
1620 1615
1621 intel_encoder->compute_config = intel_tv_compute_config; 1616 intel_encoder->compute_config = intel_tv_compute_config;
1617 intel_encoder->mode_set = intel_tv_mode_set;
1622 intel_encoder->enable = intel_enable_tv; 1618 intel_encoder->enable = intel_enable_tv;
1623 intel_encoder->disable = intel_disable_tv; 1619 intel_encoder->disable = intel_disable_tv;
1624 intel_encoder->get_hw_state = intel_tv_get_hw_state; 1620 intel_encoder->get_hw_state = intel_tv_get_hw_state;
@@ -1640,7 +1636,6 @@ intel_tv_init(struct drm_device *dev)
1640 1636
1641 intel_tv->tv_format = tv_modes[initial_mode].name; 1637 intel_tv->tv_format = tv_modes[initial_mode].name;
1642 1638
1643 drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
1644 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); 1639 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
1645 connector->interlace_allowed = false; 1640 connector->interlace_allowed = false;
1646 connector->doublescan_allowed = false; 1641 connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
new file mode 100644
index 000000000000..8f5bc869c023
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -0,0 +1,595 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26
27#define FORCEWAKE_ACK_TIMEOUT_MS 2
28
29#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
30#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
31
32#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
33#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
34
35#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
36#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
37
38#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
40
41#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42
43
44static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
45{
46 u32 gt_thread_status_mask;
47
48 if (IS_HASWELL(dev_priv->dev))
49 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
50 else
51 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
52
53 /* w/a for a sporadic read returning 0 by waiting for the GT
54 * thread to wake up.
55 */
56 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
57 DRM_ERROR("GT thread status wait timed out\n");
58}
59
60static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
61{
62 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
63 /* something from same cacheline, but !FORCEWAKE */
64 __raw_posting_read(dev_priv, ECOBUS);
65}
66
67static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
68{
69 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
70 FORCEWAKE_ACK_TIMEOUT_MS))
71 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
72
73 __raw_i915_write32(dev_priv, FORCEWAKE, 1);
74 /* something from same cacheline, but !FORCEWAKE */
75 __raw_posting_read(dev_priv, ECOBUS);
76
77 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
78 FORCEWAKE_ACK_TIMEOUT_MS))
79 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
80
81 /* WaRsForcewakeWaitTC0:snb */
82 __gen6_gt_wait_for_thread_c0(dev_priv);
83}
84
85static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
86{
87 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
88 /* something from same cacheline, but !FORCEWAKE_MT */
89 __raw_posting_read(dev_priv, ECOBUS);
90}
91
92static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
93{
94 u32 forcewake_ack;
95
96 if (IS_HASWELL(dev_priv->dev))
97 forcewake_ack = FORCEWAKE_ACK_HSW;
98 else
99 forcewake_ack = FORCEWAKE_MT_ACK;
100
101 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
102 FORCEWAKE_ACK_TIMEOUT_MS))
103 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
104
105 __raw_i915_write32(dev_priv, FORCEWAKE_MT,
106 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
107 /* something from same cacheline, but !FORCEWAKE_MT */
108 __raw_posting_read(dev_priv, ECOBUS);
109
110 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
111 FORCEWAKE_ACK_TIMEOUT_MS))
112 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
113
114 /* WaRsForcewakeWaitTC0:ivb,hsw */
115 __gen6_gt_wait_for_thread_c0(dev_priv);
116}
117
118static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
119{
120 u32 gtfifodbg;
121
122 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
123 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
124 "MMIO read or write has been dropped %x\n", gtfifodbg))
125 __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
126}
127
128static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
129{
130 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
131 /* something from same cacheline, but !FORCEWAKE */
132 __raw_posting_read(dev_priv, ECOBUS);
133 gen6_gt_check_fifodbg(dev_priv);
134}
135
136static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
137{
138 __raw_i915_write32(dev_priv, FORCEWAKE_MT,
139 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
140 /* something from same cacheline, but !FORCEWAKE_MT */
141 __raw_posting_read(dev_priv, ECOBUS);
142 gen6_gt_check_fifodbg(dev_priv);
143}
144
145static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
146{
147 int ret = 0;
148
149 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
150 int loop = 500;
151 u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
152 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
153 udelay(10);
154 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
155 }
156 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
157 ++ret;
158 dev_priv->uncore.fifo_count = fifo;
159 }
160 dev_priv->uncore.fifo_count--;
161
162 return ret;
163}
164
165static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
166{
167 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
168 _MASKED_BIT_DISABLE(0xffff));
169 /* something from same cacheline, but !FORCEWAKE_VLV */
170 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
171}
172
173static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
174{
175 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
176 FORCEWAKE_ACK_TIMEOUT_MS))
177 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
178
179 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
180 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
181 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
182 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
183
184 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
185 FORCEWAKE_ACK_TIMEOUT_MS))
186 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
187
188 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
189 FORCEWAKE_KERNEL),
190 FORCEWAKE_ACK_TIMEOUT_MS))
191 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
192
193 /* WaRsForcewakeWaitTC0:vlv */
194 __gen6_gt_wait_for_thread_c0(dev_priv);
195}
196
197static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
198{
199 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
200 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
201 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
202 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
203 /* The below doubles as a POSTING_READ */
204 gen6_gt_check_fifodbg(dev_priv);
205}
206
207void intel_uncore_early_sanitize(struct drm_device *dev)
208{
209 struct drm_i915_private *dev_priv = dev->dev_private;
210
211 if (HAS_FPGA_DBG_UNCLAIMED(dev))
212 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
213}
214
215void intel_uncore_init(struct drm_device *dev)
216{
217 struct drm_i915_private *dev_priv = dev->dev_private;
218
219 if (IS_VALLEYVIEW(dev)) {
220 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
221 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
222 } else if (IS_HASWELL(dev)) {
223 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
224 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
225 } else if (IS_IVYBRIDGE(dev)) {
226 u32 ecobus;
227
228 /* IVB configs may use multi-threaded forcewake */
229
230 /* A small trick here - if the bios hasn't configured
231 * MT forcewake, and if the device is in RC6, then
232 * force_wake_mt_get will not wake the device and the
233 * ECOBUS read will return zero. Which will be
234 * (correctly) interpreted by the test below as MT
235 * forcewake being disabled.
236 */
237 mutex_lock(&dev->struct_mutex);
238 __gen6_gt_force_wake_mt_get(dev_priv);
239 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
240 __gen6_gt_force_wake_mt_put(dev_priv);
241 mutex_unlock(&dev->struct_mutex);
242
243 if (ecobus & FORCEWAKE_MT_ENABLE) {
244 dev_priv->uncore.funcs.force_wake_get =
245 __gen6_gt_force_wake_mt_get;
246 dev_priv->uncore.funcs.force_wake_put =
247 __gen6_gt_force_wake_mt_put;
248 } else {
249 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
250 DRM_INFO("when using vblank-synced partial screen updates.\n");
251 dev_priv->uncore.funcs.force_wake_get =
252 __gen6_gt_force_wake_get;
253 dev_priv->uncore.funcs.force_wake_put =
254 __gen6_gt_force_wake_put;
255 }
256 } else if (IS_GEN6(dev)) {
257 dev_priv->uncore.funcs.force_wake_get =
258 __gen6_gt_force_wake_get;
259 dev_priv->uncore.funcs.force_wake_put =
260 __gen6_gt_force_wake_put;
261 }
262}
263
264void intel_uncore_sanitize(struct drm_device *dev)
265{
266 struct drm_i915_private *dev_priv = dev->dev_private;
267
268 if (IS_VALLEYVIEW(dev)) {
269 vlv_force_wake_reset(dev_priv);
270 } else if (INTEL_INFO(dev)->gen >= 6) {
271 __gen6_gt_force_wake_reset(dev_priv);
272 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
273 __gen6_gt_force_wake_mt_reset(dev_priv);
274 }
275
276 /* BIOS often leaves RC6 enabled, but disable it for hw init */
277 intel_disable_gt_powersave(dev);
278}
279
280/*
281 * Generally this is called implicitly by the register read function. However,
282 * if some sequence requires the GT to not power down then this function should
283 * be called at the beginning of the sequence followed by a call to
284 * gen6_gt_force_wake_put() at the end of the sequence.
285 */
286void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
287{
288 unsigned long irqflags;
289
290 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
291 if (dev_priv->uncore.forcewake_count++ == 0)
292 dev_priv->uncore.funcs.force_wake_get(dev_priv);
293 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
294}
295
296/*
297 * see gen6_gt_force_wake_get()
298 */
299void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
300{
301 unsigned long irqflags;
302
303 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
304 if (--dev_priv->uncore.forcewake_count == 0)
305 dev_priv->uncore.funcs.force_wake_put(dev_priv);
306 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
307}
308
309/* We give fast paths for the really cool registers */
310#define NEEDS_FORCE_WAKE(dev_priv, reg) \
311 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
312 ((reg) < 0x40000) && \
313 ((reg) != FORCEWAKE))
314
315static void
316ilk_dummy_write(struct drm_i915_private *dev_priv)
317{
318 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
319 * the chip from rc6 before touching it for real. MI_MODE is masked,
320 * hence harmless to write 0 into. */
321 __raw_i915_write32(dev_priv, MI_MODE, 0);
322}
323
324static void
325hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
326{
327 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
328 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
329 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
330 reg);
331 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
332 }
333}
334
335static void
336hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
337{
338 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
339 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
340 DRM_ERROR("Unclaimed write to %x\n", reg);
341 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
342 }
343}
344
345#define __i915_read(x) \
346u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
347 unsigned long irqflags; \
348 u##x val = 0; \
349 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
350 if (dev_priv->info->gen == 5) \
351 ilk_dummy_write(dev_priv); \
352 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
353 if (dev_priv->uncore.forcewake_count == 0) \
354 dev_priv->uncore.funcs.force_wake_get(dev_priv); \
355 val = __raw_i915_read##x(dev_priv, reg); \
356 if (dev_priv->uncore.forcewake_count == 0) \
357 dev_priv->uncore.funcs.force_wake_put(dev_priv); \
358 } else { \
359 val = __raw_i915_read##x(dev_priv, reg); \
360 } \
361 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
362 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
363 return val; \
364}
365
366__i915_read(8)
367__i915_read(16)
368__i915_read(32)
369__i915_read(64)
370#undef __i915_read
371
372#define __i915_write(x) \
373void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
374 unsigned long irqflags; \
375 u32 __fifo_ret = 0; \
376 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
377 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
378 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
379 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
380 } \
381 if (dev_priv->info->gen == 5) \
382 ilk_dummy_write(dev_priv); \
383 hsw_unclaimed_reg_clear(dev_priv, reg); \
384 __raw_i915_write##x(dev_priv, reg, val); \
385 if (unlikely(__fifo_ret)) { \
386 gen6_gt_check_fifodbg(dev_priv); \
387 } \
388 hsw_unclaimed_reg_check(dev_priv, reg); \
389 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
390}
391__i915_write(8)
392__i915_write(16)
393__i915_write(32)
394__i915_write(64)
395#undef __i915_write
396
397static const struct register_whitelist {
398 uint64_t offset;
399 uint32_t size;
400 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
401} whitelist[] = {
402 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
403};
404
405int i915_reg_read_ioctl(struct drm_device *dev,
406 void *data, struct drm_file *file)
407{
408 struct drm_i915_private *dev_priv = dev->dev_private;
409 struct drm_i915_reg_read *reg = data;
410 struct register_whitelist const *entry = whitelist;
411 int i;
412
413 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
414 if (entry->offset == reg->offset &&
415 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
416 break;
417 }
418
419 if (i == ARRAY_SIZE(whitelist))
420 return -EINVAL;
421
422 switch (entry->size) {
423 case 8:
424 reg->val = I915_READ64(reg->offset);
425 break;
426 case 4:
427 reg->val = I915_READ(reg->offset);
428 break;
429 case 2:
430 reg->val = I915_READ16(reg->offset);
431 break;
432 case 1:
433 reg->val = I915_READ8(reg->offset);
434 break;
435 default:
436 WARN_ON(1);
437 return -EINVAL;
438 }
439
440 return 0;
441}
442
443static int i8xx_do_reset(struct drm_device *dev)
444{
445 struct drm_i915_private *dev_priv = dev->dev_private;
446
447 if (IS_I85X(dev))
448 return -ENODEV;
449
450 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
451 POSTING_READ(D_STATE);
452
453 if (IS_I830(dev) || IS_845G(dev)) {
454 I915_WRITE(DEBUG_RESET_I830,
455 DEBUG_RESET_DISPLAY |
456 DEBUG_RESET_RENDER |
457 DEBUG_RESET_FULL);
458 POSTING_READ(DEBUG_RESET_I830);
459 msleep(1);
460
461 I915_WRITE(DEBUG_RESET_I830, 0);
462 POSTING_READ(DEBUG_RESET_I830);
463 }
464
465 msleep(1);
466
467 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
468 POSTING_READ(D_STATE);
469
470 return 0;
471}
472
473static int i965_reset_complete(struct drm_device *dev)
474{
475 u8 gdrst;
476 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
477 return (gdrst & GRDOM_RESET_ENABLE) == 0;
478}
479
480static int i965_do_reset(struct drm_device *dev)
481{
482 int ret;
483
484 /*
485 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
486 * well as the reset bit (GR/bit 0). Setting the GR bit
487 * triggers the reset; when done, the hardware will clear it.
488 */
489 pci_write_config_byte(dev->pdev, I965_GDRST,
490 GRDOM_RENDER | GRDOM_RESET_ENABLE);
491 ret = wait_for(i965_reset_complete(dev), 500);
492 if (ret)
493 return ret;
494
495 /* We can't reset render&media without also resetting display ... */
496 pci_write_config_byte(dev->pdev, I965_GDRST,
497 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
498
499 ret = wait_for(i965_reset_complete(dev), 500);
500 if (ret)
501 return ret;
502
503 pci_write_config_byte(dev->pdev, I965_GDRST, 0);
504
505 return 0;
506}
507
508static int ironlake_do_reset(struct drm_device *dev)
509{
510 struct drm_i915_private *dev_priv = dev->dev_private;
511 u32 gdrst;
512 int ret;
513
514 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
515 gdrst &= ~GRDOM_MASK;
516 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
517 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
518 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
519 if (ret)
520 return ret;
521
522 /* We can't reset render&media without also resetting display ... */
523 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
524 gdrst &= ~GRDOM_MASK;
525 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
526 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
527 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
528}
529
530static int gen6_do_reset(struct drm_device *dev)
531{
532 struct drm_i915_private *dev_priv = dev->dev_private;
533 int ret;
534 unsigned long irqflags;
535
536 /* Hold uncore.lock across reset to prevent any register access
537 * with forcewake not set correctly
538 */
539 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
540
541 /* Reset the chip */
542
543 /* GEN6_GDRST is not in the gt power well, no need to check
544 * for fifo space for the write or forcewake the chip for
545 * the read
546 */
547 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
548
549 /* Spin waiting for the device to ack the reset request */
550 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
551
552 /* If reset with a user forcewake, try to restore, otherwise turn it off */
553 if (dev_priv->uncore.forcewake_count)
554 dev_priv->uncore.funcs.force_wake_get(dev_priv);
555 else
556 dev_priv->uncore.funcs.force_wake_put(dev_priv);
557
558 /* Restore fifo count */
559 dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
560
561 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
562 return ret;
563}
564
565int intel_gpu_reset(struct drm_device *dev)
566{
567 switch (INTEL_INFO(dev)->gen) {
568 case 7:
569 case 6: return gen6_do_reset(dev);
570 case 5: return ironlake_do_reset(dev);
571 case 4: return i965_do_reset(dev);
572 case 2: return i8xx_do_reset(dev);
573 default: return -ENODEV;
574 }
575}
576
577void intel_uncore_clear_errors(struct drm_device *dev)
578{
579 struct drm_i915_private *dev_priv = dev->dev_private;
580
581 /* XXX needs spinlock around caller's grouping */
582 if (HAS_FPGA_DBG_UNCLAIMED(dev))
583 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
584}
585
586void intel_uncore_check_errors(struct drm_device *dev)
587{
588 struct drm_i915_private *dev_priv = dev->dev_private;
589
590 if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
591 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
592 DRM_ERROR("Unclaimed register before interrupt\n");
593 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
594 }
595}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 17d0a637e4fb..6b1a87c8aac5 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -50,7 +50,6 @@ static const struct file_operations mga_driver_fops = {
50 .unlocked_ioctl = drm_ioctl, 50 .unlocked_ioctl = drm_ioctl,
51 .mmap = drm_mmap, 51 .mmap = drm_mmap,
52 .poll = drm_poll, 52 .poll = drm_poll,
53 .fasync = drm_fasync,
54#ifdef CONFIG_COMPAT 53#ifdef CONFIG_COMPAT
55 .compat_ioctl = mga_compat_ioctl, 54 .compat_ioctl = mga_compat_ioctl,
56#endif 55#endif
@@ -59,7 +58,7 @@ static const struct file_operations mga_driver_fops = {
59 58
60static struct drm_driver driver = { 59static struct drm_driver driver = {
61 .driver_features = 60 .driver_features =
62 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 61 DRIVER_USE_AGP | DRIVER_PCI_DMA |
63 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 62 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
64 .dev_priv_size = sizeof(drm_mga_buf_priv_t), 63 .dev_priv_size = sizeof(drm_mga_buf_priv_t),
65 .load = mga_driver_load, 64 .load = mga_driver_load,
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 54558a01969a..ca4bc54ea214 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -149,7 +149,7 @@ typedef struct drm_mga_private {
149 unsigned int agp_size; 149 unsigned int agp_size;
150} drm_mga_private_t; 150} drm_mga_private_t;
151 151
152extern struct drm_ioctl_desc mga_ioctls[]; 152extern const struct drm_ioctl_desc mga_ioctls[];
153extern int mga_max_ioctl; 153extern int mga_max_ioctl;
154 154
155 /* mga_dma.c */ 155 /* mga_dma.c */
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 9c145143ad0f..37cc2fb4eadd 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1083,7 +1083,7 @@ file_priv)
1083 return 0; 1083 return 0;
1084} 1084}
1085 1085
1086struct drm_ioctl_desc mga_ioctls[] = { 1086const struct drm_ioctl_desc mga_ioctls[] = {
1087 DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1087 DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1088 DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH), 1088 DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
1089 DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH), 1089 DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 122b571ccc7c..fcce7b2f8011 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -81,7 +81,6 @@ static const struct file_operations mgag200_driver_fops = {
81 .unlocked_ioctl = drm_ioctl, 81 .unlocked_ioctl = drm_ioctl,
82 .mmap = mgag200_mmap, 82 .mmap = mgag200_mmap,
83 .poll = drm_poll, 83 .poll = drm_poll,
84 .fasync = drm_fasync,
85#ifdef CONFIG_COMPAT 84#ifdef CONFIG_COMPAT
86 .compat_ioctl = drm_compat_ioctl, 85 .compat_ioctl = drm_compat_ioctl,
87#endif 86#endif
@@ -89,7 +88,7 @@ static const struct file_operations mgag200_driver_fops = {
89}; 88};
90 89
91static struct drm_driver driver = { 90static struct drm_driver driver = {
92 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR, 91 .driver_features = DRIVER_GEM | DRIVER_MODESET,
93 .load = mgag200_driver_load, 92 .load = mgag200_driver_load,
94 .unload = mgag200_driver_unload, 93 .unload = mgag200_driver_unload,
95 .fops = &mgag200_driver_fops, 94 .fops = &mgag200_driver_fops,
@@ -104,7 +103,7 @@ static struct drm_driver driver = {
104 .gem_free_object = mgag200_gem_free_object, 103 .gem_free_object = mgag200_gem_free_object,
105 .dumb_create = mgag200_dumb_create, 104 .dumb_create = mgag200_dumb_create,
106 .dumb_map_offset = mgag200_dumb_mmap_offset, 105 .dumb_map_offset = mgag200_dumb_mmap_offset,
107 .dumb_destroy = mgag200_dumb_destroy, 106 .dumb_destroy = drm_gem_dumb_destroy,
108}; 107};
109 108
110static struct pci_driver mgag200_pci_driver = { 109static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 12e2499d9352..baaae19332e2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -264,9 +264,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj);
264int mgag200_dumb_create(struct drm_file *file, 264int mgag200_dumb_create(struct drm_file *file,
265 struct drm_device *dev, 265 struct drm_device *dev,
266 struct drm_mode_create_dumb *args); 266 struct drm_mode_create_dumb *args);
267int mgag200_dumb_destroy(struct drm_file *file,
268 struct drm_device *dev,
269 uint32_t handle);
270void mgag200_gem_free_object(struct drm_gem_object *obj); 267void mgag200_gem_free_object(struct drm_gem_object *obj);
271int 268int
272mgag200_dumb_mmap_offset(struct drm_file *file, 269mgag200_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9fa5685baee0..0f8b861b10b3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,13 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
310 return 0; 310 return 0;
311} 311}
312 312
313int mgag200_dumb_destroy(struct drm_file *file,
314 struct drm_device *dev,
315 uint32_t handle)
316{
317 return drm_gem_handle_delete(file, handle);
318}
319
320int mgag200_gem_init_object(struct drm_gem_object *obj) 313int mgag200_gem_init_object(struct drm_gem_object *obj)
321{ 314{
322 BUG(); 315 BUG();
@@ -349,7 +342,7 @@ void mgag200_gem_free_object(struct drm_gem_object *obj)
349 342
350static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo) 343static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
351{ 344{
352 return bo->bo.addr_space_offset; 345 return drm_vma_node_offset_addr(&bo->bo.vma_node);
353} 346}
354 347
355int 348int
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 251784aa2225..503a414cbdad 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -29,6 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
29 struct mga_crtc *mga_crtc = to_mga_crtc(crtc); 29 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
30 struct drm_device *dev = crtc->dev; 30 struct drm_device *dev = crtc->dev;
31 struct mga_device *mdev = dev->dev_private; 31 struct mga_device *mdev = dev->dev_private;
32 struct drm_framebuffer *fb = crtc->fb;
32 int i; 33 int i;
33 34
34 if (!crtc->enabled) 35 if (!crtc->enabled)
@@ -36,6 +37,28 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
36 37
37 WREG8(DAC_INDEX + MGA1064_INDEX, 0); 38 WREG8(DAC_INDEX + MGA1064_INDEX, 0);
38 39
40 if (fb && fb->bits_per_pixel == 16) {
41 int inc = (fb->depth == 15) ? 8 : 4;
42 u8 r, b;
43 for (i = 0; i < MGAG200_LUT_SIZE; i += inc) {
44 if (fb->depth == 16) {
45 if (i > (MGAG200_LUT_SIZE >> 1)) {
46 r = b = 0;
47 } else {
48 r = mga_crtc->lut_r[i << 1];
49 b = mga_crtc->lut_b[i << 1];
50 }
51 } else {
52 r = mga_crtc->lut_r[i];
53 b = mga_crtc->lut_b[i];
54 }
55 /* VGA registers */
56 WREG8(DAC_INDEX + MGA1064_COL_PAL, r);
57 WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
58 WREG8(DAC_INDEX + MGA1064_COL_PAL, b);
59 }
60 return;
61 }
39 for (i = 0; i < MGAG200_LUT_SIZE; i++) { 62 for (i = 0; i < MGAG200_LUT_SIZE; i++) {
40 /* VGA registers */ 63 /* VGA registers */
41 WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]); 64 WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
@@ -877,7 +900,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
877 900
878 pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8); 901 pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
879 if (crtc->fb->bits_per_pixel == 24) 902 if (crtc->fb->bits_per_pixel == 24)
880 pitch = pitch >> (4 - bppshift); 903 pitch = (pitch * 3) >> (4 - bppshift);
881 else 904 else
882 pitch = pitch >> (4 - bppshift); 905 pitch = pitch >> (4 - bppshift);
883 906
@@ -1251,6 +1274,24 @@ static void mga_crtc_destroy(struct drm_crtc *crtc)
1251 kfree(mga_crtc); 1274 kfree(mga_crtc);
1252} 1275}
1253 1276
1277static void mga_crtc_disable(struct drm_crtc *crtc)
1278{
1279 int ret;
1280 DRM_DEBUG_KMS("\n");
1281 mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1282 if (crtc->fb) {
1283 struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb);
1284 struct drm_gem_object *obj = mga_fb->obj;
1285 struct mgag200_bo *bo = gem_to_mga_bo(obj);
1286 ret = mgag200_bo_reserve(bo, false);
1287 if (ret)
1288 return;
1289 mgag200_bo_push_sysram(bo);
1290 mgag200_bo_unreserve(bo);
1291 }
1292 crtc->fb = NULL;
1293}
1294
1254/* These provide the minimum set of functions required to handle a CRTC */ 1295/* These provide the minimum set of functions required to handle a CRTC */
1255static const struct drm_crtc_funcs mga_crtc_funcs = { 1296static const struct drm_crtc_funcs mga_crtc_funcs = {
1256 .cursor_set = mga_crtc_cursor_set, 1297 .cursor_set = mga_crtc_cursor_set,
@@ -1261,6 +1302,7 @@ static const struct drm_crtc_funcs mga_crtc_funcs = {
1261}; 1302};
1262 1303
1263static const struct drm_crtc_helper_funcs mga_helper_funcs = { 1304static const struct drm_crtc_helper_funcs mga_helper_funcs = {
1305 .disable = mga_crtc_disable,
1264 .dpms = mga_crtc_dpms, 1306 .dpms = mga_crtc_dpms,
1265 .mode_fixup = mga_crtc_mode_fixup, 1307 .mode_fixup = mga_crtc_mode_fixup,
1266 .mode_set = mga_crtc_mode_set, 1308 .mode_set = mga_crtc_mode_set,
@@ -1581,6 +1623,8 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
1581 1623
1582 drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); 1624 drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
1583 1625
1626 drm_sysfs_connector_add(connector);
1627
1584 mga_connector->i2c = mgag200_i2c_create(dev); 1628 mga_connector->i2c = mgag200_i2c_create(dev);
1585 if (!mga_connector->i2c) 1629 if (!mga_connector->i2c)
1586 DRM_ERROR("failed to add ddc bus\n"); 1630 DRM_ERROR("failed to add ddc bus\n");
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 3acb2b044c7b..07b192fe15c6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -148,7 +148,9 @@ mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
148 148
149static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 149static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{ 150{
151 return 0; 151 struct mgag200_bo *mgabo = mgag200_bo(bo);
152
153 return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
152} 154}
153 155
154static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 156static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,8 +323,8 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
321 return ret; 323 return ret;
322 } 324 }
323 325
324 mgabo->gem.driver_private = NULL;
325 mgabo->bo.bdev = &mdev->ttm.bdev; 326 mgabo->bo.bdev = &mdev->ttm.bdev;
327 mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
326 328
327 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 329 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
328 330
@@ -353,6 +355,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
353 bo->pin_count++; 355 bo->pin_count++;
354 if (gpu_addr) 356 if (gpu_addr)
355 *gpu_addr = mgag200_bo_gpu_offset(bo); 357 *gpu_addr = mgag200_bo_gpu_offset(bo);
358 return 0;
356 } 359 }
357 360
358 mgag200_ttm_placement(bo, pl_flag); 361 mgag200_ttm_placement(bo, pl_flag);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
new file mode 100644
index 000000000000..a06c19cc56f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -0,0 +1,34 @@
1
2config DRM_MSM
3 tristate "MSM DRM"
4 depends on DRM
5 depends on ARCH_MSM
6 depends on ARCH_MSM8960
7 select DRM_KMS_HELPER
8 select SHMEM
9 select TMPFS
10 default y
11 help
12 DRM/KMS driver for MSM/snapdragon.
13
14config DRM_MSM_FBDEV
15 bool "Enable legacy fbdev support for MSM modesetting driver"
16 depends on DRM_MSM
17 select FB_SYS_FILLRECT
18 select FB_SYS_COPYAREA
19 select FB_SYS_IMAGEBLIT
20 select FB_SYS_FOPS
21 default y
22 help
23 Choose this option if you have a need for the legacy fbdev
24 support. Note that this support also provide the linux console
25 support on top of the MSM modesetting driver.
26
27config DRM_MSM_REGISTER_LOGGING
28 bool "MSM DRM register logging"
29 depends on DRM_MSM
30 default n
31 help
32 Compile in support for logging register reads/writes in a format
33 that can be parsed by envytools demsm tool. If enabled, register
34 logging can be switched on via msm.reglog=y module param.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
new file mode 100644
index 000000000000..e17914889e54
--- /dev/null
+++ b/drivers/gpu/drm/msm/Makefile
@@ -0,0 +1,30 @@
1ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
2ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
3 ccflags-y += -Werror
4endif
5
6msm-y := \
7 adreno/adreno_gpu.o \
8 adreno/a3xx_gpu.o \
9 hdmi/hdmi.o \
10 hdmi/hdmi_bridge.o \
11 hdmi/hdmi_connector.o \
12 hdmi/hdmi_i2c.o \
13 hdmi/hdmi_phy_8960.o \
14 hdmi/hdmi_phy_8x60.o \
15 mdp4/mdp4_crtc.o \
16 mdp4/mdp4_dtv_encoder.o \
17 mdp4/mdp4_format.o \
18 mdp4/mdp4_irq.o \
19 mdp4/mdp4_kms.o \
20 mdp4/mdp4_plane.o \
21 msm_drv.o \
22 msm_fb.o \
23 msm_gem.o \
24 msm_gem_submit.o \
25 msm_gpu.o \
26 msm_ringbuffer.o
27
28msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
29
30obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
new file mode 100644
index 000000000000..e036f6c1db94
--- /dev/null
+++ b/drivers/gpu/drm/msm/NOTES
@@ -0,0 +1,69 @@
1NOTES about msm drm/kms driver:
2
3In the current snapdragon SoC's, we have (at least) 3 different
4display controller blocks at play:
5 + MDP3 - ?? seems to be what is on geeksphone peak device
6 + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
7 + MDSS - snapdragon 800
8
9(I don't have a completely clear picture on which display controller
10maps to which part #)
11
12Plus a handful of blocks around them for HDMI/DSI/etc output.
13
14And on gpu side of things:
15 + zero, one, or two 2d cores (z180)
16 + and either a2xx or a3xx 3d core.
17
18But, HDMI/DSI/etc blocks seem like they can be shared across multiple
19display controller blocks. And I for sure don't want to have to deal
20with N different kms devices from xf86-video-freedreno. Plus, it
21seems like we can do some clever tricks like use GPU to trigger
22pageflip after rendering completes (ie. have the kms/crtc code build
23up gpu cmdstream to update scanout and write FLUSH register after).
24
25So, the approach is one drm driver, with some modularity. Different
26'struct msm_kms' implementations, depending on display controller.
27And one or more 'struct msm_gpu' for the various different gpu sub-
28modules.
29
30(Second part is not implemented yet. So far this is just basic KMS
31driver, and not exposing any custom ioctls to userspace for now.)
32
33The kms module provides the plane, crtc, and encoder objects, and
34loads whatever connectors are appropriate.
35
36For MDP4, the mapping is:
37
38 plane -> PIPE{RGBn,VGn} \
39 crtc -> OVLP{n} + DMA{P,S,E} (??) |-> MDP "device"
40 encoder -> DTV/LCDC/DSI (within MDP4) /
41 connector -> HDMI/DSI/etc --> other device(s)
42
43Since the irq's that drm core mostly cares about are vblank/framedone,
44we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
45and treat the MDP4 block's irq as "the" irq. Even though the connectors
46may have their own irqs which they install themselves. For this reason
47the display controller is the "master" device.
48
49Each connector probably ends up being a separate device, just for the
50logistics of finding/mapping io region, irq, etc. Idealy we would
51have a better way than just stashing the platform device in a global
52(ie. like DT super-node.. but I don't have any snapdragon hw yet that
53is using DT).
54
55Note that so far I've not been able to get any docs on the hw, and it
56seems that access to such docs would prevent me from working on the
57freedreno gallium driver. So there may be some mistakes in register
58names (I had to invent a few, since no sufficient hint was given in
59the downstream android fbdev driver), bitfield sizes, etc. My current
60state of understanding the registers is given in the envytools rnndb
61files at:
62
63 https://github.com/freedreno/envytools/tree/master/rnndb
64 (the mdp4/hdmi/dsi directories)
65
66These files are used both for a parser tool (in the same tree) to
67parse logged register reads/writes (both from downstream android fbdev
68driver, and this driver with register logging enabled), as well as to
69generate the register level headers.
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
new file mode 100644
index 000000000000..35463864b959
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -0,0 +1,1438 @@
1#ifndef A2XX_XML
2#define A2XX_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum a2xx_rb_dither_type {
44 DITHER_PIXEL = 0,
45 DITHER_SUBPIXEL = 1,
46};
47
48enum a2xx_colorformatx {
49 COLORX_4_4_4_4 = 0,
50 COLORX_1_5_5_5 = 1,
51 COLORX_5_6_5 = 2,
52 COLORX_8 = 3,
53 COLORX_8_8 = 4,
54 COLORX_8_8_8_8 = 5,
55 COLORX_S8_8_8_8 = 6,
56 COLORX_16_FLOAT = 7,
57 COLORX_16_16_FLOAT = 8,
58 COLORX_16_16_16_16_FLOAT = 9,
59 COLORX_32_FLOAT = 10,
60 COLORX_32_32_FLOAT = 11,
61 COLORX_32_32_32_32_FLOAT = 12,
62 COLORX_2_3_3 = 13,
63 COLORX_8_8_8 = 14,
64};
65
66enum a2xx_sq_surfaceformat {
67 FMT_1_REVERSE = 0,
68 FMT_1 = 1,
69 FMT_8 = 2,
70 FMT_1_5_5_5 = 3,
71 FMT_5_6_5 = 4,
72 FMT_6_5_5 = 5,
73 FMT_8_8_8_8 = 6,
74 FMT_2_10_10_10 = 7,
75 FMT_8_A = 8,
76 FMT_8_B = 9,
77 FMT_8_8 = 10,
78 FMT_Cr_Y1_Cb_Y0 = 11,
79 FMT_Y1_Cr_Y0_Cb = 12,
80 FMT_5_5_5_1 = 13,
81 FMT_8_8_8_8_A = 14,
82 FMT_4_4_4_4 = 15,
83 FMT_10_11_11 = 16,
84 FMT_11_11_10 = 17,
85 FMT_DXT1 = 18,
86 FMT_DXT2_3 = 19,
87 FMT_DXT4_5 = 20,
88 FMT_24_8 = 22,
89 FMT_24_8_FLOAT = 23,
90 FMT_16 = 24,
91 FMT_16_16 = 25,
92 FMT_16_16_16_16 = 26,
93 FMT_16_EXPAND = 27,
94 FMT_16_16_EXPAND = 28,
95 FMT_16_16_16_16_EXPAND = 29,
96 FMT_16_FLOAT = 30,
97 FMT_16_16_FLOAT = 31,
98 FMT_16_16_16_16_FLOAT = 32,
99 FMT_32 = 33,
100 FMT_32_32 = 34,
101 FMT_32_32_32_32 = 35,
102 FMT_32_FLOAT = 36,
103 FMT_32_32_FLOAT = 37,
104 FMT_32_32_32_32_FLOAT = 38,
105 FMT_32_AS_8 = 39,
106 FMT_32_AS_8_8 = 40,
107 FMT_16_MPEG = 41,
108 FMT_16_16_MPEG = 42,
109 FMT_8_INTERLACED = 43,
110 FMT_32_AS_8_INTERLACED = 44,
111 FMT_32_AS_8_8_INTERLACED = 45,
112 FMT_16_INTERLACED = 46,
113 FMT_16_MPEG_INTERLACED = 47,
114 FMT_16_16_MPEG_INTERLACED = 48,
115 FMT_DXN = 49,
116 FMT_8_8_8_8_AS_16_16_16_16 = 50,
117 FMT_DXT1_AS_16_16_16_16 = 51,
118 FMT_DXT2_3_AS_16_16_16_16 = 52,
119 FMT_DXT4_5_AS_16_16_16_16 = 53,
120 FMT_2_10_10_10_AS_16_16_16_16 = 54,
121 FMT_10_11_11_AS_16_16_16_16 = 55,
122 FMT_11_11_10_AS_16_16_16_16 = 56,
123 FMT_32_32_32_FLOAT = 57,
124 FMT_DXT3A = 58,
125 FMT_DXT5A = 59,
126 FMT_CTX1 = 60,
127 FMT_DXT3A_AS_1_1_1_1 = 61,
128};
129
130enum a2xx_sq_ps_vtx_mode {
131 POSITION_1_VECTOR = 0,
132 POSITION_2_VECTORS_UNUSED = 1,
133 POSITION_2_VECTORS_SPRITE = 2,
134 POSITION_2_VECTORS_EDGE = 3,
135 POSITION_2_VECTORS_KILL = 4,
136 POSITION_2_VECTORS_SPRITE_KILL = 5,
137 POSITION_2_VECTORS_EDGE_KILL = 6,
138 MULTIPASS = 7,
139};
140
141enum a2xx_sq_sample_cntl {
142 CENTROIDS_ONLY = 0,
143 CENTERS_ONLY = 1,
144 CENTROIDS_AND_CENTERS = 2,
145};
146
147enum a2xx_dx_clip_space {
148 DXCLIP_OPENGL = 0,
149 DXCLIP_DIRECTX = 1,
150};
151
152enum a2xx_pa_su_sc_polymode {
153 POLY_DISABLED = 0,
154 POLY_DUALMODE = 1,
155};
156
157enum a2xx_rb_edram_mode {
158 EDRAM_NOP = 0,
159 COLOR_DEPTH = 4,
160 DEPTH_ONLY = 5,
161 EDRAM_COPY = 6,
162};
163
164enum a2xx_pa_sc_pattern_bit_order {
165 LITTLE = 0,
166 BIG = 1,
167};
168
169enum a2xx_pa_sc_auto_reset_cntl {
170 NEVER = 0,
171 EACH_PRIMITIVE = 1,
172 EACH_PACKET = 2,
173};
174
175enum a2xx_pa_pixcenter {
176 PIXCENTER_D3D = 0,
177 PIXCENTER_OGL = 1,
178};
179
180enum a2xx_pa_roundmode {
181 TRUNCATE = 0,
182 ROUND = 1,
183 ROUNDTOEVEN = 2,
184 ROUNDTOODD = 3,
185};
186
187enum a2xx_pa_quantmode {
188 ONE_SIXTEENTH = 0,
189 ONE_EIGTH = 1,
190 ONE_QUARTER = 2,
191 ONE_HALF = 3,
192 ONE = 4,
193};
194
195enum a2xx_rb_copy_sample_select {
196 SAMPLE_0 = 0,
197 SAMPLE_1 = 1,
198 SAMPLE_2 = 2,
199 SAMPLE_3 = 3,
200 SAMPLE_01 = 4,
201 SAMPLE_23 = 5,
202 SAMPLE_0123 = 6,
203};
204
205enum sq_tex_clamp {
206 SQ_TEX_WRAP = 0,
207 SQ_TEX_MIRROR = 1,
208 SQ_TEX_CLAMP_LAST_TEXEL = 2,
209 SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3,
210 SQ_TEX_CLAMP_HALF_BORDER = 4,
211 SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5,
212 SQ_TEX_CLAMP_BORDER = 6,
213 SQ_TEX_MIRROR_ONCE_BORDER = 7,
214};
215
216enum sq_tex_swiz {
217 SQ_TEX_X = 0,
218 SQ_TEX_Y = 1,
219 SQ_TEX_Z = 2,
220 SQ_TEX_W = 3,
221 SQ_TEX_ZERO = 4,
222 SQ_TEX_ONE = 5,
223};
224
225enum sq_tex_filter {
226 SQ_TEX_FILTER_POINT = 0,
227 SQ_TEX_FILTER_BILINEAR = 1,
228 SQ_TEX_FILTER_BICUBIC = 2,
229};
230
231#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
232
233#define REG_A2XX_RBBM_CNTL 0x0000003b
234
235#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c
236
237#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0
238
239#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
240
241#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
242
243#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
244
245#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x00000398
246
247#define REG_A2XX_RBBM_DEBUG 0x0000039b
248
249#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
250
251#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
252
253#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0
254
255#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1
256
257#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
258
259#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
260
261#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
262
263#define REG_A2XX_RBBM_INT_ACK 0x000003b6
264
265#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
266
267#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
268
269#define REG_A2XX_RBBM_PERIPHID2 0x000003fa
270
271#define REG_A2XX_CP_PERFMON_CNTL 0x00000444
272
273#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445
274
275#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446
276
277#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
278
279#define REG_A2XX_CP_ST_BASE 0x0000044d
280
281#define REG_A2XX_CP_ST_BUFSZ 0x0000044e
282
283#define REG_A2XX_CP_IB1_BASE 0x00000458
284
285#define REG_A2XX_CP_IB1_BUFSZ 0x00000459
286
287#define REG_A2XX_CP_IB2_BASE 0x0000045a
288
289#define REG_A2XX_CP_IB2_BUFSZ 0x0000045b
290
291#define REG_A2XX_CP_STAT 0x0000047f
292
293#define REG_A2XX_RBBM_STATUS 0x000005d0
294#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
295#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
296static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
297{
298 return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK;
299}
300#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020
301#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100
302#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200
303#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400
304#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800
305#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000
306#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000
307#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000
308#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000
309#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000
310#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000
311#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000
312#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000
313#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000
314#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000
315#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000
316#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000
317#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
318#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
319
320#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
321#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
322#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
323static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
324{
325 return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
326}
327#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
328#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
329static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
330{
331 return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
332}
333
334static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
335
336static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
337
338static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
339
340static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
341
342#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38
343
344#define REG_A2XX_PC_DEBUG_DATA 0x00000c39
345
346#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44
347
348#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80
349
350#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80
351
352#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81
353
354#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
355
356#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
357
358#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
359
360#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
361
362#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
363
364#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
365
366#define REG_A2XX_SQ_INT_CNTL 0x00000d34
367
368#define REG_A2XX_SQ_INT_STATUS 0x00000d35
369
370#define REG_A2XX_SQ_INT_ACK 0x00000d36
371
372#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae
373
374#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf
375
376#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0
377
378#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1
379
380#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2
381
382#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3
383
384#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4
385
386#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5
387
388#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6
389
390#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7
391
392#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8
393
394#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9
395
396#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba
397
398#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb
399
400#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc
401
402#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd
403
404#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe
405
406#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf
407
408#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0
409
410#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1
411
412#define REG_A2XX_TC_CNTL_STATUS 0x00000e00
413#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001
414
415#define REG_A2XX_TP0_CHICKEN 0x00000e1e
416
417#define REG_A2XX_RB_BC_CONTROL 0x00000f01
418#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001
419#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006
420#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1
421static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val)
422{
423 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK;
424}
425#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008
426#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010
427#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020
428#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040
429#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080
430#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00
431#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8
432static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val)
433{
434 return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK;
435}
436#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000
437#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000
438#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000
439#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000
440#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000
441#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18
442static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val)
443{
444 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK;
445}
446#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000
447#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000
448#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23
449static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val)
450{
451 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK;
452}
453#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000
454#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27
455static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val)
456{
457 return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK;
458}
459#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000
460#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000
461#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000
462
463#define REG_A2XX_RB_EDRAM_INFO 0x00000f02
464
465#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26
466
467#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
468
469#define REG_A2XX_RB_SURFACE_INFO 0x00002000
470
471#define REG_A2XX_RB_COLOR_INFO 0x00002001
472#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
473#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0
474static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val)
475{
476 return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK;
477}
478#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030
479#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4
480static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val)
481{
482 return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK;
483}
484#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040
485#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180
486#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7
487static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val)
488{
489 return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK;
490}
491#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600
492#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9
493static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
494{
495 return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK;
496}
497#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000
498#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
499static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
500{
501 return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
502}
503
504#define REG_A2XX_RB_DEPTH_INFO 0x00002002
505#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
506#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
507static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
508{
509 return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
510}
511#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
512#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
513static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
514{
515 return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
516}
517
518#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
519
520#define REG_A2XX_COHER_DEST_BASE_0 0x00002006
521
522#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e
523#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
524#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
525#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
526static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
527{
528 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK;
529}
530#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
531#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
532static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
533{
534 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK;
535}
536
537#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f
538#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
539#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
540#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
541static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
542{
543 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK;
544}
545#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
546#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
547static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
548{
549 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK;
550}
551
552#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080
553#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff
554#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
555static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val)
556{
557 return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK;
558}
559#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000
560#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
561static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val)
562{
563 return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK;
564}
565#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000
566
567#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081
568#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
569#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
570#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
571static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
572{
573 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK;
574}
575#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
576#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
577static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
578{
579 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK;
580}
581
582#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082
583#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
584#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
585#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
586static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
587{
588 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK;
589}
590#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
591#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
592static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
593{
594 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK;
595}
596
597#define REG_A2XX_UNKNOWN_2010 0x00002010
598
599#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100
600
601#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101
602
603#define REG_A2XX_VGT_INDX_OFFSET 0x00002102
604
605#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103
606
607#define REG_A2XX_RB_COLOR_MASK 0x00002104
608#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001
609#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002
610#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004
611#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008
612
613#define REG_A2XX_RB_BLEND_RED 0x00002105
614
615#define REG_A2XX_RB_BLEND_GREEN 0x00002106
616
617#define REG_A2XX_RB_BLEND_BLUE 0x00002107
618
619#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
620
621#define REG_A2XX_RB_FOG_COLOR 0x00002109
622
623#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
624#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
625#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
626static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
627{
628 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
629}
630#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
631#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
632static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
633{
634 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
635}
636#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
637#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
638static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
639{
640 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
641}
642
643#define REG_A2XX_RB_STENCILREFMASK 0x0000210d
644#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
645#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
646static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
647{
648 return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK;
649}
650#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
651#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
652static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
653{
654 return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK;
655}
656#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
657#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
658static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
659{
660 return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
661}
662
663#define REG_A2XX_RB_ALPHA_REF 0x0000210e
664
665#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f
666#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff
667#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0
668static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val)
669{
670 return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK;
671}
672
673#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110
674#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff
675#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0
676static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val)
677{
678 return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK;
679}
680
681#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111
682#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff
683#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0
684static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val)
685{
686 return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK;
687}
688
689#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112
690#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff
691#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0
692static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val)
693{
694 return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK;
695}
696
697#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113
698#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff
699#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0
700static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val)
701{
702 return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK;
703}
704
705#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114
706#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff
707#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0
708static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val)
709{
710 return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK;
711}
712
713#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180
714#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff
715#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0
716static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val)
717{
718 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK;
719}
720#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00
721#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8
722static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val)
723{
724 return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK;
725}
726#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000
727#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000
728#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000
729#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000
730#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000
731#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20
732static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val)
733{
734 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK;
735}
736#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000
737#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24
738static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val)
739{
740 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK;
741}
742#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000
743#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27
744static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val)
745{
746 return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK;
747}
748#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000
749
750#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181
751#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001
752#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002
753#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c
754#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2
755static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val)
756{
757 return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK;
758}
759#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00
760#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8
761static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
762{
763 return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK;
764}
765#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000
766#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000
767#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
768
769#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
770
771#define REG_A2XX_SQ_WRAPPING_0 0x00002183
772
773#define REG_A2XX_SQ_WRAPPING_1 0x00002184
774
775#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
776
777#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
778
779#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
780#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
781#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
782#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004
783#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008
784#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070
785#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4
786static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val)
787{
788 return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK;
789}
790#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080
791#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700
792#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8
793static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val)
794{
795 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK;
796}
797#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800
798#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11
799static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val)
800{
801 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK;
802}
803#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000
804#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14
805static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val)
806{
807 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK;
808}
809#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000
810#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17
811static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val)
812{
813 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK;
814}
815#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000
816#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20
817static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val)
818{
819 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK;
820}
821#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000
822#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23
823static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val)
824{
825 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK;
826}
827#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000
828#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26
829static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val)
830{
831 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK;
832}
833#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000
834#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29
835static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val)
836{
837 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK;
838}
839
840#define REG_A2XX_RB_BLEND_CONTROL 0x00002201
841#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f
842#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0
843static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val)
844{
845 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK;
846}
847#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
848#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
849static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val)
850{
851 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
852}
853#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00
854#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8
855static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val)
856{
857 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK;
858}
859#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000
860#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16
861static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val)
862{
863 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK;
864}
865#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
866#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
867static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val)
868{
869 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
870}
871#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000
872#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24
873static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val)
874{
875 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK;
876}
877#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000
878#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000
879
880#define REG_A2XX_RB_COLORCONTROL 0x00002202
881#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007
882#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0
883static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val)
884{
885 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK;
886}
887#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008
888#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010
889#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020
890#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040
891#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080
892#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00
893#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8
894static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val)
895{
896 return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK;
897}
898#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000
899#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12
900static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
901{
902 return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK;
903}
904#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000
905#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14
906static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val)
907{
908 return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK;
909}
910#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000
911#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000
912#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24
913static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val)
914{
915 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK;
916}
917#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000
918#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26
919static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val)
920{
921 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK;
922}
923#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000
924#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28
925static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val)
926{
927 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK;
928}
929#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000
930#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30
931static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val)
932{
933 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK;
934}
935
936#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203
937#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007
938#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0
939static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val)
940{
941 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK;
942}
943#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038
944#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3
945static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val)
946{
947 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK;
948}
949#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0
950#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6
951static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val)
952{
953 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK;
954}
955
956#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204
957#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
958#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000
959#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000
960#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19
961static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val)
962{
963 return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK;
964}
965#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000
966#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000
967#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000
968#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000
969#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000
970
971#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205
972#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001
973#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002
974#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004
975#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018
976#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3
977static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val)
978{
979 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK;
980}
981#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0
982#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5
983static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
984{
985 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK;
986}
987#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700
988#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8
989static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
990{
991 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK;
992}
993#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800
994#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000
995#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000
996#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000
997#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000
998#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000
999#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000
1000#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000
1001#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000
1002#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000
1003#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000
1004#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000
1005#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000
1006#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000
1007#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000
1008#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000
1009
1010#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206
1011#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001
1012#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002
1013#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004
1014#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008
1015#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010
1016#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020
1017#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100
1018#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200
1019#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400
1020#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800
1021
1022#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207
1023#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007
1024#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0
1025static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val)
1026{
1027 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK;
1028}
1029#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038
1030#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3
1031static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val)
1032{
1033 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK;
1034}
1035#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0
1036#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6
1037static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val)
1038{
1039 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK;
1040}
1041
1042#define REG_A2XX_RB_MODECONTROL 0x00002208
1043#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007
1044#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0
1045static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val)
1046{
1047 return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK;
1048}
1049
1050#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209
1051
1052#define REG_A2XX_RB_SAMPLE_POS 0x0000220a
1053
1054#define REG_A2XX_CLEAR_COLOR 0x0000220b
1055#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff
1056#define A2XX_CLEAR_COLOR_RED__SHIFT 0
1057static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val)
1058{
1059 return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK;
1060}
1061#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00
1062#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8
1063static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val)
1064{
1065 return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK;
1066}
1067#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000
1068#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16
1069static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val)
1070{
1071 return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK;
1072}
1073#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000
1074#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24
1075static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
1076{
1077 return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK;
1078}
1079
1080#define REG_A2XX_A220_GRAS_CONTROL 0x00002210
1081
1082#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280
1083#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff
1084#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
1085static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
1086{
1087 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
1088}
1089#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
1090#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
1091static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
1092{
1093 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
1094}
1095
1096#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
1097#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
1098#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
1099static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
1100{
1101 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
1102}
1103#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
1104#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
1105static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
1106{
1107 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
1108}
1109
1110#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
1111#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff
1112#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
1113static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
1114{
1115 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
1116}
1117
1118#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
1119#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff
1120#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0
1121static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val)
1122{
1123 return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK;
1124}
1125#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000
1126#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16
1127static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val)
1128{
1129 return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK;
1130}
1131#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000
1132#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28
1133static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val)
1134{
1135 return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK;
1136}
1137#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000
1138#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29
1139static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val)
1140{
1141 return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK;
1142}
1143
1144#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
1145
1146#define REG_A2XX_VGT_ENHANCE 0x00002294
1147
1148#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300
1149#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff
1150#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0
1151static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
1152{
1153 return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK;
1154}
1155#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100
1156#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200
1157#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
1158
1159#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
1160
1161#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
1162#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
1163#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0
1164static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val)
1165{
1166 return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK;
1167}
1168#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006
1169#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1
1170static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val)
1171{
1172 return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK;
1173}
1174#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380
1175#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7
1176static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val)
1177{
1178 return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK;
1179}
1180
1181#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303
1182#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff
1183#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0
1184static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val)
1185{
1186 return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK;
1187}
1188
1189#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304
1190#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff
1191#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0
1192static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val)
1193{
1194 return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK;
1195}
1196
1197#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305
1198#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff
1199#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0
1200static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val)
1201{
1202 return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK;
1203}
1204
1205#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306
1206#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff
1207#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0
1208static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val)
1209{
1210 return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK;
1211}
1212
1213#define REG_A2XX_SQ_VS_CONST 0x00002307
1214#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff
1215#define A2XX_SQ_VS_CONST_BASE__SHIFT 0
1216static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val)
1217{
1218 return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK;
1219}
1220#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000
1221#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12
1222static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val)
1223{
1224 return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK;
1225}
1226
1227#define REG_A2XX_SQ_PS_CONST 0x00002308
1228#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff
1229#define A2XX_SQ_PS_CONST_BASE__SHIFT 0
1230static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val)
1231{
1232 return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK;
1233}
1234#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000
1235#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12
1236static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
1237{
1238 return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK;
1239}
1240
1241#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309
1242
1243#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a
1244
1245#define REG_A2XX_PA_SC_AA_MASK 0x00002312
1246
1247#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
1248
1249#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
1250
1251#define REG_A2XX_RB_COPY_CONTROL 0x00002318
1252#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
1253#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0
1254static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val)
1255{
1256 return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK;
1257}
1258#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008
1259#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0
1260#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4
1261static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
1262{
1263 return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK;
1264}
1265
1266#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319
1267
1268#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a
1269#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff
1270#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
1271static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
1272{
1273 return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
1274}
1275
1276#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
1277#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007
1278#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0
1279static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val)
1280{
1281 return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK;
1282}
1283#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008
1284#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0
1285#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4
1286static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val)
1287{
1288 return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK;
1289}
1290#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
1291#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
1292static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val)
1293{
1294 return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK;
1295}
1296#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
1297#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
1298static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
1299{
1300 return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
1301}
1302#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000
1303#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12
1304static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val)
1305{
1306 return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK;
1307}
1308#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000
1309#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000
1310#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000
1311#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000
1312
1313#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c
1314#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff
1315#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0
1316static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val)
1317{
1318 return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK;
1319}
1320#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000
1321#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13
1322static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
1323{
1324 return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK;
1325}
1326
1327#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d
1328
1329#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324
1330
1331#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326
1332
1333#define REG_A2XX_A225_GRAS_UCP0X 0x00002340
1334
1335#define REG_A2XX_A225_GRAS_UCP5W 0x00002357
1336
1337#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360
1338
1339#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
1340
1341#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
1342
1343#define REG_A2XX_SQ_CONSTANT_0 0x00004000
1344
1345#define REG_A2XX_SQ_FETCH_0 0x00004800
1346
1347#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900
1348
1349#define REG_A2XX_SQ_CF_LOOP 0x00004908
1350
1351#define REG_A2XX_COHER_SIZE_PM4 0x00000a29
1352
1353#define REG_A2XX_COHER_BASE_PM4 0x00000a2a
1354
1355#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
1356
1357#define REG_A2XX_SQ_TEX_0 0x00000000
1358#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
1359#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
1360static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
1361{
1362 return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK;
1363}
1364#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000
1365#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13
1366static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val)
1367{
1368 return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK;
1369}
1370#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000
1371#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16
1372static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
1373{
1374 return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
1375}
1376#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
1377#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
1378static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
1379{
1380 return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
1381}
1382
1383#define REG_A2XX_SQ_TEX_1 0x00000001
1384
1385#define REG_A2XX_SQ_TEX_2 0x00000002
1386#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
1387#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0
1388static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val)
1389{
1390 return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK;
1391}
1392#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000
1393#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13
1394static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
1395{
1396 return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
1397}
1398
1399#define REG_A2XX_SQ_TEX_3 0x00000003
1400#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
1401#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
1402static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
1403{
1404 return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK;
1405}
1406#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070
1407#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4
1408static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val)
1409{
1410 return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK;
1411}
1412#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380
1413#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7
1414static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val)
1415{
1416 return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK;
1417}
1418#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00
1419#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10
1420static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
1421{
1422 return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
1423}
1424#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
1425#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
1426static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
1427{
1428 return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK;
1429}
1430#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000
1431#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21
1432static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
1433{
1434 return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
1435}
1436
1437
1438#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
new file mode 100644
index 000000000000..d183516067b4
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -0,0 +1,2193 @@
1#ifndef A3XX_XML
2#define A3XX_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum a3xx_render_mode {
44 RB_RENDERING_PASS = 0,
45 RB_TILING_PASS = 1,
46 RB_RESOLVE_PASS = 2,
47};
48
49enum a3xx_tile_mode {
50 LINEAR = 0,
51 TILE_32X32 = 2,
52};
53
54enum a3xx_threadmode {
55 MULTI = 0,
56 SINGLE = 1,
57};
58
59enum a3xx_instrbuffermode {
60 BUFFER = 1,
61};
62
63enum a3xx_threadsize {
64 TWO_QUADS = 0,
65 FOUR_QUADS = 1,
66};
67
68enum a3xx_state_block_id {
69 HLSQ_BLOCK_ID_TP_TEX = 2,
70 HLSQ_BLOCK_ID_TP_MIPMAP = 3,
71 HLSQ_BLOCK_ID_SP_VS = 4,
72 HLSQ_BLOCK_ID_SP_FS = 6,
73};
74
75enum a3xx_cache_opcode {
76 INVALIDATE = 1,
77};
78
79enum a3xx_vtx_fmt {
80 VFMT_FLOAT_32 = 0,
81 VFMT_FLOAT_32_32 = 1,
82 VFMT_FLOAT_32_32_32 = 2,
83 VFMT_FLOAT_32_32_32_32 = 3,
84 VFMT_FLOAT_16 = 4,
85 VFMT_FLOAT_16_16 = 5,
86 VFMT_FLOAT_16_16_16 = 6,
87 VFMT_FLOAT_16_16_16_16 = 7,
88 VFMT_FIXED_32 = 8,
89 VFMT_FIXED_32_32 = 9,
90 VFMT_FIXED_32_32_32 = 10,
91 VFMT_FIXED_32_32_32_32 = 11,
92 VFMT_SHORT_16 = 16,
93 VFMT_SHORT_16_16 = 17,
94 VFMT_SHORT_16_16_16 = 18,
95 VFMT_SHORT_16_16_16_16 = 19,
96 VFMT_USHORT_16 = 20,
97 VFMT_USHORT_16_16 = 21,
98 VFMT_USHORT_16_16_16 = 22,
99 VFMT_USHORT_16_16_16_16 = 23,
100 VFMT_NORM_SHORT_16 = 24,
101 VFMT_NORM_SHORT_16_16 = 25,
102 VFMT_NORM_SHORT_16_16_16 = 26,
103 VFMT_NORM_SHORT_16_16_16_16 = 27,
104 VFMT_NORM_USHORT_16 = 28,
105 VFMT_NORM_USHORT_16_16 = 29,
106 VFMT_NORM_USHORT_16_16_16 = 30,
107 VFMT_NORM_USHORT_16_16_16_16 = 31,
108 VFMT_UBYTE_8 = 40,
109 VFMT_UBYTE_8_8 = 41,
110 VFMT_UBYTE_8_8_8 = 42,
111 VFMT_UBYTE_8_8_8_8 = 43,
112 VFMT_NORM_UBYTE_8 = 44,
113 VFMT_NORM_UBYTE_8_8 = 45,
114 VFMT_NORM_UBYTE_8_8_8 = 46,
115 VFMT_NORM_UBYTE_8_8_8_8 = 47,
116 VFMT_BYTE_8 = 48,
117 VFMT_BYTE_8_8 = 49,
118 VFMT_BYTE_8_8_8 = 50,
119 VFMT_BYTE_8_8_8_8 = 51,
120 VFMT_NORM_BYTE_8 = 52,
121 VFMT_NORM_BYTE_8_8 = 53,
122 VFMT_NORM_BYTE_8_8_8 = 54,
123 VFMT_NORM_BYTE_8_8_8_8 = 55,
124 VFMT_UINT_10_10_10_2 = 60,
125 VFMT_NORM_UINT_10_10_10_2 = 61,
126 VFMT_INT_10_10_10_2 = 62,
127 VFMT_NORM_INT_10_10_10_2 = 63,
128};
129
130enum a3xx_tex_fmt {
131 TFMT_NORM_USHORT_565 = 4,
132 TFMT_NORM_USHORT_5551 = 6,
133 TFMT_NORM_USHORT_4444 = 7,
134 TFMT_NORM_UINT_X8Z24 = 10,
135 TFMT_NORM_UINT_NV12_UV_TILED = 17,
136 TFMT_NORM_UINT_NV12_Y_TILED = 19,
137 TFMT_NORM_UINT_NV12_UV = 21,
138 TFMT_NORM_UINT_NV12_Y = 23,
139 TFMT_NORM_UINT_I420_Y = 24,
140 TFMT_NORM_UINT_I420_U = 26,
141 TFMT_NORM_UINT_I420_V = 27,
142 TFMT_NORM_UINT_2_10_10_10 = 41,
143 TFMT_NORM_UINT_A8 = 44,
144 TFMT_NORM_UINT_L8_A8 = 47,
145 TFMT_NORM_UINT_8 = 48,
146 TFMT_NORM_UINT_8_8 = 49,
147 TFMT_NORM_UINT_8_8_8 = 50,
148 TFMT_NORM_UINT_8_8_8_8 = 51,
149 TFMT_FLOAT_16 = 64,
150 TFMT_FLOAT_16_16 = 65,
151 TFMT_FLOAT_16_16_16_16 = 67,
152 TFMT_FLOAT_32 = 84,
153 TFMT_FLOAT_32_32 = 85,
154 TFMT_FLOAT_32_32_32_32 = 87,
155};
156
157enum a3xx_tex_fetchsize {
158 TFETCH_DISABLE = 0,
159 TFETCH_1_BYTE = 1,
160 TFETCH_2_BYTE = 2,
161 TFETCH_4_BYTE = 3,
162 TFETCH_8_BYTE = 4,
163 TFETCH_16_BYTE = 5,
164};
165
166enum a3xx_color_fmt {
167 RB_R8G8B8_UNORM = 4,
168 RB_R8G8B8A8_UNORM = 8,
169 RB_Z16_UNORM = 12,
170 RB_A8_UNORM = 20,
171};
172
173enum a3xx_color_swap {
174 WZYX = 0,
175 WXYZ = 1,
176 ZYXW = 2,
177 XYZW = 3,
178};
179
180enum a3xx_msaa_samples {
181 MSAA_ONE = 0,
182 MSAA_TWO = 1,
183 MSAA_FOUR = 2,
184};
185
186enum a3xx_sp_perfcounter_select {
187 SP_FS_CFLOW_INSTRUCTIONS = 12,
188 SP_FS_FULL_ALU_INSTRUCTIONS = 14,
189 SP0_ICL1_MISSES = 26,
190 SP_ALU_ACTIVE_CYCLES = 29,
191};
192
193enum adreno_rb_copy_control_mode {
194 RB_COPY_RESOLVE = 1,
195 RB_COPY_DEPTH_STENCIL = 5,
196};
197
198enum a3xx_tex_filter {
199 A3XX_TEX_NEAREST = 0,
200 A3XX_TEX_LINEAR = 1,
201};
202
203enum a3xx_tex_clamp {
204 A3XX_TEX_REPEAT = 0,
205 A3XX_TEX_CLAMP_TO_EDGE = 1,
206 A3XX_TEX_MIRROR_REPEAT = 2,
207 A3XX_TEX_CLAMP_NONE = 3,
208};
209
210enum a3xx_tex_swiz {
211 A3XX_TEX_X = 0,
212 A3XX_TEX_Y = 1,
213 A3XX_TEX_Z = 2,
214 A3XX_TEX_W = 3,
215 A3XX_TEX_ZERO = 4,
216 A3XX_TEX_ONE = 5,
217};
218
219enum a3xx_tex_type {
220 A3XX_TEX_1D = 0,
221 A3XX_TEX_2D = 1,
222 A3XX_TEX_CUBE = 2,
223 A3XX_TEX_3D = 3,
224};
225
226#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
227#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
228#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
229#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
230#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
231#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
232#define A3XX_INT0_VFD_ERROR 0x00000040
233#define A3XX_INT0_CP_SW_INT 0x00000080
234#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
235#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200
236#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
237#define A3XX_INT0_CP_HW_FAULT 0x00000800
238#define A3XX_INT0_CP_DMA 0x00001000
239#define A3XX_INT0_CP_IB2_INT 0x00002000
240#define A3XX_INT0_CP_IB1_INT 0x00004000
241#define A3XX_INT0_CP_RB_INT 0x00008000
242#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
243#define A3XX_INT0_CP_RB_DONE_TS 0x00020000
244#define A3XX_INT0_CP_VS_DONE_TS 0x00040000
245#define A3XX_INT0_CP_PS_DONE_TS 0x00080000
246#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000
247#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
248#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
249#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
250#define REG_A3XX_RBBM_HW_VERSION 0x00000000
251
252#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
253
254#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002
255
256#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010
257
258#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012
259
260#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018
261
262#define REG_A3XX_RBBM_AHB_CTL0 0x00000020
263
264#define REG_A3XX_RBBM_AHB_CTL1 0x00000021
265
266#define REG_A3XX_RBBM_AHB_CMD 0x00000022
267
268#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027
269
270#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e
271
272#define REG_A3XX_RBBM_STATUS 0x00000030
273#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001
274#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
275#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
276#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
277#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000
278#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000
279#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000
280#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000
281#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
282#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
283#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000
284#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000
285#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000
286#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000
287#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000
288#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000
289#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000
290#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000
291#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
292#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
293#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
294
295#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
296
297#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
298
299#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051
300
301#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054
302
303#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057
304
305#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
306
307#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
308
309#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
310
311#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
312
313#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
314
315#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
316
317#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082
318
319#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084
320
321#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085
322
323#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086
324
325#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087
326
327#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088
328
329#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090
330
331#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091
332
333#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092
334
335#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093
336
337#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094
338
339#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095
340
341#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096
342
343#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097
344
345#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098
346
347#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099
348
349#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a
350
351#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b
352
353#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c
354
355#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d
356
357#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e
358
359#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f
360
361#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0
362
363#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1
364
365#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2
366
367#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3
368
369#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4
370
371#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5
372
373#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6
374
375#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7
376
377#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8
378
379#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9
380
381#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa
382
383#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab
384
385#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac
386
387#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad
388
389#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae
390
391#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af
392
393#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0
394
395#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1
396
397#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2
398
399#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3
400
401#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4
402
403#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5
404
405#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6
406
407#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7
408
409#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8
410
411#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9
412
413#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba
414
415#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb
416
417#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc
418
419#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd
420
421#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be
422
423#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf
424
425#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0
426
427#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1
428
429#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2
430
431#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3
432
433#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4
434
435#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5
436
437#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6
438
439#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7
440
441#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8
442
443#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9
444
445#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca
446
447#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb
448
449#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc
450
451#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd
452
453#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce
454
455#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf
456
457#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0
458
459#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1
460
461#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2
462
463#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3
464
465#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4
466
467#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5
468
469#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6
470
471#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7
472
473#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8
474
475#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9
476
477#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da
478
479#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db
480
481#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc
482
483#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd
484
485#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de
486
487#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df
488
489#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0
490
491#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1
492
493#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2
494
495#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3
496
497#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4
498
499#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5
500
501#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea
502
503#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb
504
505#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec
506
507#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed
508
509#define REG_A3XX_RBBM_RBBM_CTL 0x00000100
510
511#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111
512
513#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112
514
515#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9
516
517#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca
518
519#define REG_A3XX_CP_ROQ_ADDR 0x000001cc
520
521#define REG_A3XX_CP_ROQ_DATA 0x000001cd
522
523#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1
524
525#define REG_A3XX_CP_MERCIU_DATA 0x000001d2
526
527#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3
528
529#define REG_A3XX_CP_MEQ_ADDR 0x000001da
530
531#define REG_A3XX_CP_MEQ_DATA 0x000001db
532
533#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
534
535#define REG_A3XX_CP_HW_FAULT 0x0000045c
536
537#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e
538
539#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
540
541static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
542
543static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
544
545#define REG_A3XX_CP_AHB_FAULT 0x0000054d
546
547#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
548#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
549#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
550#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
551#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
552#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
553#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
554
555#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
556#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
557#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
558static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
559{
560 return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
561}
562#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
563#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
564static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
565{
566 return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
567}
568
569#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048
570#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
571#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
572static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val)
573{
574 return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK;
575}
576
577#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049
578#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
579#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
580static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val)
581{
582 return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK;
583}
584
585#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a
586#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
587#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
588static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val)
589{
590 return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK;
591}
592
593#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b
594#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
595#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
596static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val)
597{
598 return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK;
599}
600
601#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c
602#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
603#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
604static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val)
605{
606 return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK;
607}
608
609#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d
610#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
611#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
612static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
613{
614 return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK;
615}
616
617#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
618
619#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
620
621#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
622#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
623#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
624static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
625{
626 return ((((uint32_t)(val * 40.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
627}
628
629#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
630#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
631#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
632static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
633{
634 return ((((uint32_t)(val * 44.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
635}
636
637#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
638#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
639#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
640#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc
641#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2
642static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val)
643{
644 return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
645}
646#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
647
648#define REG_A3XX_GRAS_SC_CONTROL 0x00002072
649#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0
650#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4
651static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
652{
653 return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
654}
655#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00
656#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8
657static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val)
658{
659 return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
660}
661#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
662#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
663static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
664{
665 return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
666}
667
668#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074
669#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
670#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
671#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
672static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
673{
674 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
675}
676#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
677#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
678static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
679{
680 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
681}
682
683#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075
684#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
685#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
686#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
687static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
688{
689 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
690}
691#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
692#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
693static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
694{
695 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
696}
697
698#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079
699#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
700#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
701#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
702static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
703{
704 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
705}
706#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
707#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
708static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
709{
710 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
711}
712
713#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a
714#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
715#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
716#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
717static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
718{
719 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
720}
721#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
722#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
723static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
724{
725 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
726}
727
728#define REG_A3XX_RB_MODE_CONTROL 0x000020c0
729#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080
730#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700
731#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8
732static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
733{
734 return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
735}
736#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
737#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
738
739#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
740#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
741#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
742static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
743{
744 return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
745}
746#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
747#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
748#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
749#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
750static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
751{
752 return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
753}
754
755#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
756#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
757#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000
758#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12
759static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val)
760{
761 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK;
762}
763#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000
764#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16
765static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
766{
767 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
768}
769
770#define REG_A3XX_UNKNOWN_20C3 0x000020c3
771
772static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
773
774static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
775#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
776#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010
777#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
778#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
779#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
780static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val)
781{
782 return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
783}
784#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000
785#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12
786static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
787{
788 return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK;
789}
790#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
791#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
792static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
793{
794 return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
795}
796
797static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; }
798#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
799#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
800static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val)
801{
802 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
803}
804#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
805#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
806static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val)
807{
808 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
809}
810#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00
811#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10
812static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
813{
814 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
815}
816#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
817#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
818static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
819{
820 return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
821}
822
823static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
824#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0
825#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
826static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
827{
828 return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
829}
830
831static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
832#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
833#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
834static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
835{
836 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
837}
838#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
839#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
840static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
841{
842 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
843}
844#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
845#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
846static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
847{
848 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
849}
850#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
851#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
852static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
853{
854 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
855}
856#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
857#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
858static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
859{
860 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
861}
862#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
863#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
864static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
865{
866 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
867}
868#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000
869
870#define REG_A3XX_RB_BLEND_RED 0x000020e4
871#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff
872#define A3XX_RB_BLEND_RED_UINT__SHIFT 0
873static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
874{
875 return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK;
876}
877#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
878#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
879static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
880{
881 return ((util_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
882}
883
884#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
885#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
886#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0
887static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
888{
889 return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK;
890}
891#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
892#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
893static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
894{
895 return ((util_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
896}
897
898#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
899#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
900#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0
901static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
902{
903 return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK;
904}
905#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
906#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
907static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
908{
909 return ((util_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
910}
911
912#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
913#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
914#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0
915static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
916{
917 return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK;
918}
919#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
920#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
921static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
922{
923 return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
924}
925
926#define REG_A3XX_UNKNOWN_20E8 0x000020e8
927
928#define REG_A3XX_UNKNOWN_20E9 0x000020e9
929
930#define REG_A3XX_UNKNOWN_20EA 0x000020ea
931
932#define REG_A3XX_UNKNOWN_20EB 0x000020eb
933
934#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
935#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
936#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
937static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
938{
939 return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
940}
941#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
942#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
943static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
944{
945 return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
946}
947#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00
948#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10
949static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
950{
951 return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
952}
953
954#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
955#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
956#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
957static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
958{
959 return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
960}
961
962#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
963#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
964#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
965static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
966{
967 return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
968}
969
970#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
971#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003
972#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0
973static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val)
974{
975 return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK;
976}
977#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
978#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
979static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val)
980{
981 return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK;
982}
983#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
984#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
985static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
986{
987 return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
988}
989#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
990#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
991static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
992{
993 return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
994}
995#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
996#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
997static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
998{
999 return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
1000}
1001
1002#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
1003#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
1004#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
1005#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008
1006#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
1007#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
1008static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
1009{
1010 return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
1011}
1012#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
1013#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
1014
1015#define REG_A3XX_UNKNOWN_2101 0x00002101
1016
1017#define REG_A3XX_RB_DEPTH_INFO 0x00002102
1018#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
1019#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
1020static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
1021{
1022 return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
1023}
1024#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800
1025#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
1026static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
1027{
1028 return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
1029}
1030
1031#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
1032#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff
1033#define A3XX_RB_DEPTH_PITCH__SHIFT 0
1034static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
1035{
1036 return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
1037}
1038
1039#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
1040#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
1041#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004
1042#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
1043#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
1044static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
1045{
1046 return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK;
1047}
1048#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
1049#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
1050static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
1051{
1052 return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK;
1053}
1054#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
1055#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
1056static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
1057{
1058 return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK;
1059}
1060#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
1061#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
1062static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
1063{
1064 return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
1065}
1066#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
1067#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
1068static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
1069{
1070 return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
1071}
1072#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
1073#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
1074static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
1075{
1076 return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
1077}
1078#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
1079#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
1080static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
1081{
1082 return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
1083}
1084#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
1085#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
1086static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
1087{
1088 return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
1089}
1090
1091#define REG_A3XX_UNKNOWN_2105 0x00002105
1092
1093#define REG_A3XX_UNKNOWN_2106 0x00002106
1094
1095#define REG_A3XX_UNKNOWN_2107 0x00002107
1096
1097#define REG_A3XX_RB_STENCILREFMASK 0x00002108
1098#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
1099#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
1100static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
1101{
1102 return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK;
1103}
1104#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
1105#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
1106static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
1107{
1108 return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK;
1109}
1110#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
1111#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
1112static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
1113{
1114 return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
1115}
1116
1117#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109
1118#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
1119#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
1120static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
1121{
1122 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
1123}
1124#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
1125#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
1126static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
1127{
1128 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
1129}
1130#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
1131#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
1132static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
1133{
1134 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
1135}
1136
1137#define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e
1138#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff
1139#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
1140static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val)
1141{
1142 return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK;
1143}
1144#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000
1145#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
1146static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val)
1147{
1148 return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK;
1149}
1150
1151#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
1152
1153#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
1154
1155#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec
1156#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
1157#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0
1158static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val)
1159{
1160 return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK;
1161}
1162#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0
1163#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5
1164static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
1165{
1166 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK;
1167}
1168#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700
1169#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8
1170static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
1171{
1172 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
1173}
1174#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
1175
1176#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
1177
1178#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
1179#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
1180#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
1181static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
1182{
1183 return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
1184}
1185#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
1186#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
1187#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
1188#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
1189#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000
1190#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
1191#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
1192#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
1193#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
1194
1195#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
1196#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
1197#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
1198static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
1199{
1200 return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
1201}
1202#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
1203#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
1204
1205#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
1206#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
1207#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
1208static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
1209{
1210 return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
1211}
1212
1213#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
1214
1215#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
1216#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
1217#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1218static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1219{
1220 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
1221}
1222#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
1223#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
1224static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
1225{
1226 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
1227}
1228#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1229#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1230static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1231{
1232 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
1233}
1234
1235#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
1236#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
1237#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1238static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1239{
1240 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
1241}
1242#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
1243#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
1244static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
1245{
1246 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
1247}
1248#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1249#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1250static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1251{
1252 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
1253}
1254
1255#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
1256#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
1257#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
1258static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
1259{
1260 return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
1261}
1262#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
1263#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
1264static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
1265{
1266 return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK;
1267}
1268
1269#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
1270#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
1271#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
1272static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
1273{
1274 return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
1275}
1276#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
1277#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
1278static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
1279{
1280 return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK;
1281}
1282
1283#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
1284
1285#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b
1286
1287#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c
1288
1289#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
1290
1291#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212
1292
1293#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
1294
1295#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
1296
1297#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
1298
1299#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
1300
1301#define REG_A3XX_VFD_CONTROL_0 0x00002240
1302#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff
1303#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
1304static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
1305{
1306 return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
1307}
1308#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000
1309#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18
1310static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val)
1311{
1312 return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK;
1313}
1314#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000
1315#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22
1316static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
1317{
1318 return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
1319}
1320#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000
1321#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27
1322static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
1323{
1324 return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
1325}
1326
1327#define REG_A3XX_VFD_CONTROL_1 0x00002241
1328#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
1329#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
1330static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
1331{
1332 return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
1333}
1334#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
1335#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
1336static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
1337{
1338 return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK;
1339}
1340#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
1341#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
1342static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
1343{
1344 return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK;
1345}
1346
1347#define REG_A3XX_VFD_INDEX_MIN 0x00002242
1348
1349#define REG_A3XX_VFD_INDEX_MAX 0x00002243
1350
1351#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244
1352
1353#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
1354
1355static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
1356
1357static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
1358#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
1359#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
1360static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
1361{
1362 return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
1363}
1364#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
1365#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
1366static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
1367{
1368 return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
1369}
1370#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
1371#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
1372#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
1373static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val)
1374{
1375 return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK;
1376}
1377#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
1378#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
1379static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
1380{
1381 return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
1382}
1383
1384static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
1385
1386static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
1387
1388static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
1389#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
1390#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
1391static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
1392{
1393 return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
1394}
1395#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
1396#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
1397#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
1398static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val)
1399{
1400 return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK;
1401}
1402#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
1403#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12
1404static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
1405{
1406 return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
1407}
1408#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
1409#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
1410static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
1411{
1412 return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
1413}
1414#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
1415#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
1416
1417#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e
1418#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f
1419#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0
1420static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val)
1421{
1422 return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK;
1423}
1424#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00
1425#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8
1426static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val)
1427{
1428 return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK;
1429}
1430
1431#define REG_A3XX_VPC_ATTR 0x00002280
1432#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff
1433#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
1434static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
1435{
1436 return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
1437}
1438#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
1439#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
1440static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
1441{
1442 return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK;
1443}
1444#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000
1445#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28
1446static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val)
1447{
1448 return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK;
1449}
1450
1451#define REG_A3XX_VPC_PACK 0x00002281
1452#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
1453#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
1454static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
1455{
1456 return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
1457}
1458#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
1459#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
1460static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
1461{
1462 return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
1463}
1464
1465static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1466
1467static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1468
1469static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1470
1471static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1472
1473#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
1474
1475#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b
1476
1477#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
1478#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
1479#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000
1480#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
1481static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
1482{
1483 return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
1484}
1485#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
1486#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
1487static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
1488{
1489 return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
1490}
1491#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000
1492#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22
1493static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val)
1494{
1495 return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK;
1496}
1497
1498#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
1499#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
1500#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
1501static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
1502{
1503 return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
1504}
1505#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
1506#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
1507static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
1508{
1509 return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
1510}
1511#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
1512#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
1513#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
1514static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
1515{
1516 return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
1517}
1518#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
1519#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
1520static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
1521{
1522 return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
1523}
1524#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
1525#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
1526static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
1527{
1528 return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
1529}
1530#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
1531#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
1532static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1533{
1534 return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
1535}
1536#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1537#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
1538#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
1539#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
1540static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
1541{
1542 return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK;
1543}
1544
1545#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5
1546#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
1547#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
1548static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
1549{
1550 return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
1551}
1552#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
1553#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
1554static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
1555{
1556 return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
1557}
1558#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x3f000000
1559#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
1560static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
1561{
1562 return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
1563}
1564
1565#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6
1566#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
1567#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
1568static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
1569{
1570 return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK;
1571}
1572#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
1573#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
1574static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
1575{
1576 return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
1577}
1578#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
1579#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
1580static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
1581{
1582 return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
1583}
1584
1585static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1586
1587static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1588#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
1589#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
1590static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
1591{
1592 return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
1593}
1594#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
1595#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
1596static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
1597{
1598 return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
1599}
1600#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
1601#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
1602static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
1603{
1604 return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
1605}
1606#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
1607#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
1608static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
1609{
1610 return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
1611}
1612
1613static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
1614
1615static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
1616#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
1617#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
1618static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
1619{
1620 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
1621}
1622#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
1623#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
1624static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
1625{
1626 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
1627}
1628#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
1629#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
1630static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
1631{
1632 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
1633}
1634#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
1635#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
1636static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
1637{
1638 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
1639}
1640
1641#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
1642#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1643#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1644static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1645{
1646 return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1647}
1648#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1649#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1650static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1651{
1652 return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1653}
1654
1655#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
1656
1657#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6
1658
1659#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
1660
1661#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
1662
1663#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df
1664#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
1665#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0
1666static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val)
1667{
1668 return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK;
1669}
1670
1671#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0
1672#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
1673#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
1674static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
1675{
1676 return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
1677}
1678#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
1679#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
1680static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
1681{
1682 return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
1683}
1684#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
1685#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
1686#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
1687static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
1688{
1689 return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
1690}
1691#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
1692#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
1693static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
1694{
1695 return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
1696}
1697#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
1698#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
1699static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
1700{
1701 return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
1702}
1703#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
1704#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
1705static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1706{
1707 return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
1708}
1709#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1710#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
1711#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
1712#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
1713static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
1714{
1715 return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK;
1716}
1717
1718#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1
1719#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
1720#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
1721static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
1722{
1723 return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
1724}
1725#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
1726#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
1727static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
1728{
1729 return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK;
1730}
1731#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000
1732#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20
1733static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
1734{
1735 return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
1736}
1737#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
1738#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
1739static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
1740{
1741 return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK;
1742}
1743
1744#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
1745#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1746#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1747static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1748{
1749 return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1750}
1751#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1752#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1753static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1754{
1755 return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1756}
1757
1758#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
1759
1760#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4
1761
1762#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
1763
1764#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
1765
1766#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8
1767
1768#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
1769
1770#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
1771
1772static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
1773
1774static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
1775#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
1776#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0
1777static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
1778{
1779 return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
1780}
1781#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
1782
1783static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
1784
1785static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
1786#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
1787#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0
1788static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val)
1789{
1790 return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK;
1791}
1792
1793#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff
1794#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
1795#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0
1796static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
1797{
1798 return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
1799}
1800
1801#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
1802#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
1803#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
1804static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
1805{
1806 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK;
1807}
1808#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
1809#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
1810static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
1811{
1812 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK;
1813}
1814#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
1815#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
1816static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
1817{
1818 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK;
1819}
1820
1821#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341
1822
1823#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342
1824#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
1825#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
1826static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
1827{
1828 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK;
1829}
1830#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
1831#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
1832static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
1833{
1834 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK;
1835}
1836#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
1837#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
1838static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
1839{
1840 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK;
1841}
1842
1843#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343
1844
1845#define REG_A3XX_VBIF_CLKON 0x00003001
1846
1847#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c
1848
1849#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d
1850
1851#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e
1852
1853#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c
1854
1855#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d
1856
1857#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
1858
1859#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
1860
1861#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
1862
1863#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030
1864
1865#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031
1866
1867#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034
1868
1869#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035
1870
1871#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036
1872
1873#define REG_A3XX_VBIF_ARB_CTL 0x0000303c
1874
1875#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
1876
1877#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058
1878
1879#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e
1880
1881#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
1882
1883#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
1884#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
1885#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
1886static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
1887{
1888 return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
1889}
1890#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
1891#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
1892static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
1893{
1894 return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
1895}
1896
1897#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
1898
1899static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
1900
1901static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
1902#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
1903#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0
1904static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val)
1905{
1906 return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK;
1907}
1908#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00
1909#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10
1910static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val)
1911{
1912 return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK;
1913}
1914#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000
1915#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20
1916static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val)
1917{
1918 return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK;
1919}
1920#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000
1921#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24
1922static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val)
1923{
1924 return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK;
1925}
1926
1927static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
1928
1929static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
1930
1931#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
1932
1933#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
1934
1935#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49
1936
1937#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a
1938
1939#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
1940
1941#define REG_A3XX_UNKNOWN_0C81 0x00000c81
1942
1943#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
1944
1945#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89
1946
1947#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a
1948
1949#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
1950
1951static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
1952
1953static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
1954
1955static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; }
1956
1957static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; }
1958
1959static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; }
1960
1961#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
1962
1963#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
1964
1965#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
1966
1967#define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0
1968#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff
1969#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0
1970static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val)
1971{
1972 return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK;
1973}
1974#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000
1975#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14
1976static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val)
1977{
1978 return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK;
1979}
1980
1981#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
1982
1983#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01
1984
1985#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02
1986
1987#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03
1988
1989#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04
1990
1991#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05
1992
1993#define REG_A3XX_UNKNOWN_0E43 0x00000e43
1994
1995#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44
1996
1997#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45
1998
1999#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61
2000
2001#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62
2002
2003#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64
2004
2005#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65
2006
2007#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82
2008
2009#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84
2010
2011#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85
2012
2013#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86
2014
2015#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87
2016
2017#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88
2018
2019#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89
2020
2021#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0
2022#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff
2023#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0
2024static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val)
2025{
2026 return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK;
2027}
2028
2029#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1
2030#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff
2031#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0
2032static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val)
2033{
2034 return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK;
2035}
2036#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000
2037#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28
2038static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val)
2039{
2040 return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK;
2041}
2042#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
2043
2044#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
2045
2046#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
2047
2048#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6
2049
2050#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7
2051
2052#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8
2053
2054#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9
2055
2056#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca
2057
2058#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb
2059
2060#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0
2061
2062#define REG_A3XX_UNKNOWN_0F03 0x00000f03
2063
2064#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04
2065
2066#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05
2067
2068#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06
2069
2070#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07
2071
2072#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08
2073
2074#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
2075
2076#define REG_A3XX_TEX_SAMP_0 0x00000000
2077#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
2078#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
2079static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
2080{
2081 return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK;
2082}
2083#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030
2084#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4
2085static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val)
2086{
2087 return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK;
2088}
2089#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0
2090#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6
2091static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val)
2092{
2093 return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK;
2094}
2095#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00
2096#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9
2097static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val)
2098{
2099 return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK;
2100}
2101#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000
2102#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12
2103static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
2104{
2105 return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
2106}
2107#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
2108
2109#define REG_A3XX_TEX_SAMP_1 0x00000001
2110
2111#define REG_A3XX_TEX_CONST_0 0x00000000
2112#define A3XX_TEX_CONST_0_TILED 0x00000001
2113#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
2114#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
2115static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
2116{
2117 return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK;
2118}
2119#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
2120#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
2121static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val)
2122{
2123 return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK;
2124}
2125#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
2126#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
2127static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val)
2128{
2129 return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK;
2130}
2131#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
2132#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13
2133static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
2134{
2135 return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
2136}
2137#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
2138#define A3XX_TEX_CONST_0_FMT__SHIFT 22
2139static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
2140{
2141 return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
2142}
2143#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
2144#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
2145static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
2146{
2147 return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK;
2148}
2149
2150#define REG_A3XX_TEX_CONST_1 0x00000001
2151#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff
2152#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0
2153static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val)
2154{
2155 return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK;
2156}
2157#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000
2158#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14
2159static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
2160{
2161 return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
2162}
2163#define A3XX_TEX_CONST_1_FETCHSIZE__MASK 0xf0000000
2164#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT 28
2165static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
2166{
2167 return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK;
2168}
2169
2170#define REG_A3XX_TEX_CONST_2 0x00000002
2171#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff
2172#define A3XX_TEX_CONST_2_INDX__SHIFT 0
2173static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
2174{
2175 return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK;
2176}
2177#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000
2178#define A3XX_TEX_CONST_2_PITCH__SHIFT 12
2179static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val)
2180{
2181 return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK;
2182}
2183#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000
2184#define A3XX_TEX_CONST_2_SWAP__SHIFT 30
2185static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
2186{
2187 return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK;
2188}
2189
2190#define REG_A3XX_TEX_CONST_3 0x00000003
2191
2192
2193#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
new file mode 100644
index 000000000000..035bd13dc8bd
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -0,0 +1,502 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "a3xx_gpu.h"
19
20#define A3XX_INT0_MASK \
21 (A3XX_INT0_RBBM_AHB_ERROR | \
22 A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
23 A3XX_INT0_CP_T0_PACKET_IN_IB | \
24 A3XX_INT0_CP_OPCODE_ERROR | \
25 A3XX_INT0_CP_RESERVED_BIT_ERROR | \
26 A3XX_INT0_CP_HW_FAULT | \
27 A3XX_INT0_CP_IB1_INT | \
28 A3XX_INT0_CP_IB2_INT | \
29 A3XX_INT0_CP_RB_INT | \
30 A3XX_INT0_CP_REG_PROTECT_FAULT | \
31 A3XX_INT0_CP_AHB_ERROR_HALT | \
32 A3XX_INT0_UCHE_OOB_ACCESS)
33
34static struct platform_device *a3xx_pdev;
35
36static void a3xx_me_init(struct msm_gpu *gpu)
37{
38 struct msm_ringbuffer *ring = gpu->rb;
39
40 OUT_PKT3(ring, CP_ME_INIT, 17);
41 OUT_RING(ring, 0x000003f7);
42 OUT_RING(ring, 0x00000000);
43 OUT_RING(ring, 0x00000000);
44 OUT_RING(ring, 0x00000000);
45 OUT_RING(ring, 0x00000080);
46 OUT_RING(ring, 0x00000100);
47 OUT_RING(ring, 0x00000180);
48 OUT_RING(ring, 0x00006600);
49 OUT_RING(ring, 0x00000150);
50 OUT_RING(ring, 0x0000014e);
51 OUT_RING(ring, 0x00000154);
52 OUT_RING(ring, 0x00000001);
53 OUT_RING(ring, 0x00000000);
54 OUT_RING(ring, 0x00000000);
55 OUT_RING(ring, 0x00000000);
56 OUT_RING(ring, 0x00000000);
57 OUT_RING(ring, 0x00000000);
58
59 gpu->funcs->flush(gpu);
60 gpu->funcs->idle(gpu);
61}
62
63static int a3xx_hw_init(struct msm_gpu *gpu)
64{
65 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
66 uint32_t *ptr, len;
67 int i, ret;
68
69 DBG("%s", gpu->name);
70
71 if (adreno_is_a305(adreno_gpu)) {
72 /* Set up 16 deep read/write request queues: */
73 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
74 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
75 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
76 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
77 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
78 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
79 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
80 /* Enable WR-REQ: */
81 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
82 /* Set up round robin arbitration between both AXI ports: */
83 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
84 /* Set up AOOO: */
85 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
86 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
87
88 } else if (adreno_is_a320(adreno_gpu)) {
89 /* Set up 16 deep read/write request queues: */
90 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
91 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
92 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
93 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
94 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
95 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
96 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
97 /* Enable WR-REQ: */
98 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
99 /* Set up round robin arbitration between both AXI ports: */
100 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
101 /* Set up AOOO: */
102 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
103 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
104 /* Enable 1K sort: */
105 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
106 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
107
108 } else if (adreno_is_a330(adreno_gpu)) {
109 /* Set up 16 deep read/write request queues: */
110 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
111 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
112 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
113 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
114 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
115 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
116 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
117 /* Enable WR-REQ: */
118 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
119 /* Set up round robin arbitration between both AXI ports: */
120 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
121 /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
122 gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
123 /* Set up AOOO: */
124 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
125 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
126 /* Enable 1K sort: */
127 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
128 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
129 /* Disable VBIF clock gating. This is to enable AXI running
130 * higher frequency than GPU:
131 */
132 gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
133
134 } else {
135 BUG();
136 }
137
138 /* Make all blocks contribute to the GPU BUSY perf counter: */
139 gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
140
141 /* Tune the hystersis counters for SP and CP idle detection: */
142 gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
143 gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
144
145 /* Enable the RBBM error reporting bits. This lets us get
146 * useful information on failure:
147 */
148 gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
149
150 /* Enable AHB error reporting: */
151 gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
152
153 /* Turn on the power counters: */
154 gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
155
156 /* Turn on hang detection - this spews a lot of useful information
157 * into the RBBM registers on a hang:
158 */
159 gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
160
161 /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
162 gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
163
164 /* Enable Clock gating: */
165 gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
166
167 /* Set the OCMEM base address for A330 */
168//TODO:
169// if (adreno_is_a330(adreno_gpu)) {
170// gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
171// (unsigned int)(a3xx_gpu->ocmem_base >> 14));
172// }
173
174 /* Turn on performance counters: */
175 gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
176
177 /* Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS
178 * we will use this to augment our hang detection:
179 */
180 gpu_write(gpu, REG_A3XX_SP_PERFCOUNTER7_SELECT,
181 SP_FS_FULL_ALU_INSTRUCTIONS);
182
183 gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
184
185 ret = adreno_hw_init(gpu);
186 if (ret)
187 return ret;
188
189 /* setup access protection: */
190 gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
191
192 /* RBBM registers */
193 gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
194 gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
195 gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
196 gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
197 gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
198 gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
199
200 /* CP registers */
201 gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
202 gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
203 gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
204 gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
205 gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
206
207 /* RB registers */
208 gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
209
210 /* VBIF registers */
211 gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
212
213 /* NOTE: PM4/micro-engine firmware registers look to be the same
214 * for a2xx and a3xx.. we could possibly push that part down to
215 * adreno_gpu base class. Or push both PM4 and PFP but
216 * parameterize the pfp ucode addr/data registers..
217 */
218
219 /* Load PM4: */
220 ptr = (uint32_t *)(adreno_gpu->pm4->data);
221 len = adreno_gpu->pm4->size / 4;
222 DBG("loading PM4 ucode version: %u", ptr[0]);
223
224 gpu_write(gpu, REG_AXXX_CP_DEBUG,
225 AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
226 AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
227 gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
228 for (i = 1; i < len; i++)
229 gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
230
231 /* Load PFP: */
232 ptr = (uint32_t *)(adreno_gpu->pfp->data);
233 len = adreno_gpu->pfp->size / 4;
234 DBG("loading PFP ucode version: %u", ptr[0]);
235
236 gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
237 for (i = 1; i < len; i++)
238 gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
239
240 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
241 if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
242 gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
243 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
244 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
245 AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
246
247
248 /* clear ME_HALT to start micro engine */
249 gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
250
251 a3xx_me_init(gpu);
252
253 return 0;
254}
255
256static void a3xx_destroy(struct msm_gpu *gpu)
257{
258 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
259 struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
260
261 DBG("%s", gpu->name);
262
263 adreno_gpu_cleanup(adreno_gpu);
264 put_device(&a3xx_gpu->pdev->dev);
265 kfree(a3xx_gpu);
266}
267
268static void a3xx_idle(struct msm_gpu *gpu)
269{
270 unsigned long t;
271
272 /* wait for ringbuffer to drain: */
273 adreno_idle(gpu);
274
275 t = jiffies + ADRENO_IDLE_TIMEOUT;
276
277 /* then wait for GPU to finish: */
278 do {
279 uint32_t rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
280 if (!(rbbm_status & A3XX_RBBM_STATUS_GPU_BUSY))
281 return;
282 } while(time_before(jiffies, t));
283
284 DRM_ERROR("timeout waiting for %s to idle!\n", gpu->name);
285
286 /* TODO maybe we need to reset GPU here to recover from hang? */
287}
288
289static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
290{
291 uint32_t status;
292
293 status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
294 DBG("%s: %08x", gpu->name, status);
295
296 // TODO
297
298 gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
299
300 msm_gpu_retire(gpu);
301
302 return IRQ_HANDLED;
303}
304
305#ifdef CONFIG_DEBUG_FS
306static const unsigned int a3xx_registers[] = {
307 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
308 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
309 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
310 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
311 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
312 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
313 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
314 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
315 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
316 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
317 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
318 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
319 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
320 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
321 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
322 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
323 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
324 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
325 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
326 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
327 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
328 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
329 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
330 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
331 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
332 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
333 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
334 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
335 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
336 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
337 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
338 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
339 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
340 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
341 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
342 0x303c, 0x303c, 0x305e, 0x305f,
343};
344
345static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
346{
347 int i;
348
349 adreno_show(gpu, m);
350 seq_printf(m, "status: %08x\n",
351 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
352
353 /* dump these out in a form that can be parsed by demsm: */
354 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
355 for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
356 uint32_t start = a3xx_registers[i];
357 uint32_t end = a3xx_registers[i+1];
358 uint32_t addr;
359
360 for (addr = start; addr <= end; addr++) {
361 uint32_t val = gpu_read(gpu, addr);
362 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
363 }
364 }
365}
366#endif
367
368static const struct adreno_gpu_funcs funcs = {
369 .base = {
370 .get_param = adreno_get_param,
371 .hw_init = a3xx_hw_init,
372 .pm_suspend = msm_gpu_pm_suspend,
373 .pm_resume = msm_gpu_pm_resume,
374 .recover = adreno_recover,
375 .last_fence = adreno_last_fence,
376 .submit = adreno_submit,
377 .flush = adreno_flush,
378 .idle = a3xx_idle,
379 .irq = a3xx_irq,
380 .destroy = a3xx_destroy,
381#ifdef CONFIG_DEBUG_FS
382 .show = a3xx_show,
383#endif
384 },
385};
386
387struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
388{
389 struct a3xx_gpu *a3xx_gpu = NULL;
390 struct msm_gpu *gpu;
391 struct platform_device *pdev = a3xx_pdev;
392 struct adreno_platform_config *config;
393 int ret;
394
395 if (!pdev) {
396 dev_err(dev->dev, "no a3xx device\n");
397 ret = -ENXIO;
398 goto fail;
399 }
400
401 config = pdev->dev.platform_data;
402
403 a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
404 if (!a3xx_gpu) {
405 ret = -ENOMEM;
406 goto fail;
407 }
408
409 gpu = &a3xx_gpu->base.base;
410
411 get_device(&pdev->dev);
412 a3xx_gpu->pdev = pdev;
413
414 gpu->fast_rate = config->fast_rate;
415 gpu->slow_rate = config->slow_rate;
416 gpu->bus_freq = config->bus_freq;
417
418 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
419 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
420
421 ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
422 &funcs, config->rev);
423 if (ret)
424 goto fail;
425
426 return &a3xx_gpu->base.base;
427
428fail:
429 if (a3xx_gpu)
430 a3xx_destroy(&a3xx_gpu->base.base);
431
432 return ERR_PTR(ret);
433}
434
435/*
436 * The a3xx device:
437 */
438
439static int a3xx_probe(struct platform_device *pdev)
440{
441 static struct adreno_platform_config config = {};
442#ifdef CONFIG_OF
443 /* TODO */
444#else
445 uint32_t version = socinfo_get_version();
446 if (cpu_is_apq8064ab()) {
447 config.fast_rate = 450000000;
448 config.slow_rate = 27000000;
449 config.bus_freq = 4;
450 config.rev = ADRENO_REV(3, 2, 1, 0);
451 } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
452 config.fast_rate = 400000000;
453 config.slow_rate = 27000000;
454 config.bus_freq = 4;
455
456 if (SOCINFO_VERSION_MAJOR(version) == 2)
457 config.rev = ADRENO_REV(3, 2, 0, 2);
458 else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
459 (SOCINFO_VERSION_MINOR(version) == 1))
460 config.rev = ADRENO_REV(3, 2, 0, 1);
461 else
462 config.rev = ADRENO_REV(3, 2, 0, 0);
463
464 } else if (cpu_is_msm8930()) {
465 config.fast_rate = 400000000;
466 config.slow_rate = 27000000;
467 config.bus_freq = 3;
468
469 if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
470 (SOCINFO_VERSION_MINOR(version) == 2))
471 config.rev = ADRENO_REV(3, 0, 5, 2);
472 else
473 config.rev = ADRENO_REV(3, 0, 5, 0);
474
475 }
476#endif
477 pdev->dev.platform_data = &config;
478 a3xx_pdev = pdev;
479 return 0;
480}
481
482static int a3xx_remove(struct platform_device *pdev)
483{
484 a3xx_pdev = NULL;
485 return 0;
486}
487
488static struct platform_driver a3xx_driver = {
489 .probe = a3xx_probe,
490 .remove = a3xx_remove,
491 .driver.name = "kgsl-3d0",
492};
493
494void __init a3xx_register(void)
495{
496 platform_driver_register(&a3xx_driver);
497}
498
499void __exit a3xx_unregister(void)
500{
501 platform_driver_unregister(&a3xx_driver);
502}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
new file mode 100644
index 000000000000..32c398c2d00a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __A3XX_GPU_H__
19#define __A3XX_GPU_H__
20
21#include "adreno_gpu.h"
22#include "a3xx.xml.h"
23
24struct a3xx_gpu {
25 struct adreno_gpu base;
26 struct platform_device *pdev;
27};
28#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
29
30#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
new file mode 100644
index 000000000000..61979d458ac0
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -0,0 +1,432 @@
1#ifndef ADRENO_COMMON_XML
2#define ADRENO_COMMON_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum adreno_pa_su_sc_draw {
44 PC_DRAW_POINTS = 0,
45 PC_DRAW_LINES = 1,
46 PC_DRAW_TRIANGLES = 2,
47};
48
49enum adreno_compare_func {
50 FUNC_NEVER = 0,
51 FUNC_LESS = 1,
52 FUNC_EQUAL = 2,
53 FUNC_LEQUAL = 3,
54 FUNC_GREATER = 4,
55 FUNC_NOTEQUAL = 5,
56 FUNC_GEQUAL = 6,
57 FUNC_ALWAYS = 7,
58};
59
60enum adreno_stencil_op {
61 STENCIL_KEEP = 0,
62 STENCIL_ZERO = 1,
63 STENCIL_REPLACE = 2,
64 STENCIL_INCR_CLAMP = 3,
65 STENCIL_DECR_CLAMP = 4,
66 STENCIL_INVERT = 5,
67 STENCIL_INCR_WRAP = 6,
68 STENCIL_DECR_WRAP = 7,
69};
70
71enum adreno_rb_blend_factor {
72 FACTOR_ZERO = 0,
73 FACTOR_ONE = 1,
74 FACTOR_SRC_COLOR = 4,
75 FACTOR_ONE_MINUS_SRC_COLOR = 5,
76 FACTOR_SRC_ALPHA = 6,
77 FACTOR_ONE_MINUS_SRC_ALPHA = 7,
78 FACTOR_DST_COLOR = 8,
79 FACTOR_ONE_MINUS_DST_COLOR = 9,
80 FACTOR_DST_ALPHA = 10,
81 FACTOR_ONE_MINUS_DST_ALPHA = 11,
82 FACTOR_CONSTANT_COLOR = 12,
83 FACTOR_ONE_MINUS_CONSTANT_COLOR = 13,
84 FACTOR_CONSTANT_ALPHA = 14,
85 FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
86 FACTOR_SRC_ALPHA_SATURATE = 16,
87};
88
89enum adreno_rb_blend_opcode {
90 BLEND_DST_PLUS_SRC = 0,
91 BLEND_SRC_MINUS_DST = 1,
92 BLEND_MIN_DST_SRC = 2,
93 BLEND_MAX_DST_SRC = 3,
94 BLEND_DST_MINUS_SRC = 4,
95 BLEND_DST_PLUS_SRC_BIAS = 5,
96};
97
98enum adreno_rb_surface_endian {
99 ENDIAN_NONE = 0,
100 ENDIAN_8IN16 = 1,
101 ENDIAN_8IN32 = 2,
102 ENDIAN_16IN32 = 3,
103 ENDIAN_8IN64 = 4,
104 ENDIAN_8IN128 = 5,
105};
106
107enum adreno_rb_dither_mode {
108 DITHER_DISABLE = 0,
109 DITHER_ALWAYS = 1,
110 DITHER_IF_ALPHA_OFF = 2,
111};
112
113enum adreno_rb_depth_format {
114 DEPTHX_16 = 0,
115 DEPTHX_24_8 = 1,
116};
117
118enum adreno_mmu_clnt_beh {
119 BEH_NEVR = 0,
120 BEH_TRAN_RNG = 1,
121 BEH_TRAN_FLT = 2,
122};
123
124#define REG_AXXX_MH_MMU_CONFIG 0x00000040
125#define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
126#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
127#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
128#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
129static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
130{
131 return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
132}
133#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
134#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
135static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
136{
137 return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
138}
139#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
140#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
141static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
142{
143 return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
144}
145#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
146#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
147static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
148{
149 return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
150}
151#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
152#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
153static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
154{
155 return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
156}
157#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
158#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
159static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
160{
161 return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
162}
163#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
164#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
165static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
166{
167 return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
168}
169#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
170#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
171static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
172{
173 return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
174}
175#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
176#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
177static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
178{
179 return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
180}
181#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
182#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
183static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
184{
185 return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
186}
187#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
188#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
189static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
190{
191 return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
192}
193
194#define REG_AXXX_MH_MMU_VA_RANGE 0x00000041
195
196#define REG_AXXX_MH_MMU_PT_BASE 0x00000042
197
198#define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043
199
200#define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044
201
202#define REG_AXXX_MH_MMU_INVALIDATE 0x00000045
203
204#define REG_AXXX_MH_MMU_MPU_BASE 0x00000046
205
206#define REG_AXXX_MH_MMU_MPU_END 0x00000047
207
208#define REG_AXXX_CP_RB_BASE 0x000001c0
209
210#define REG_AXXX_CP_RB_CNTL 0x000001c1
211#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f
212#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0
213static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val)
214{
215 return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK;
216}
217#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00
218#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8
219static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val)
220{
221 return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK;
222}
223#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000
224#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16
225static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val)
226{
227 return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK;
228}
229#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000
230#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000
231#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000
232
233#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3
234#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003
235#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0
236static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
237{
238 return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK;
239}
240#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc
241#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
242static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
243{
244 return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
245}
246
247#define REG_AXXX_CP_RB_RPTR 0x000001c4
248
249#define REG_AXXX_CP_RB_WPTR 0x000001c5
250
251#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6
252
253#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7
254
255#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8
256
257#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5
258#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f
259#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0
260static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val)
261{
262 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK;
263}
264#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00
265#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8
266static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val)
267{
268 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK;
269}
270#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000
271#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16
272static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
273{
274 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK;
275}
276
277#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
278
279#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
280#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
281#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0
282static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val)
283{
284 return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK;
285}
286#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00
287#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8
288static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val)
289{
290 return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK;
291}
292#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000
293#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16
294static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val)
295{
296 return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK;
297}
298
299#define REG_AXXX_CP_STQ_AVAIL 0x000001d8
300#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f
301#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0
302static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val)
303{
304 return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK;
305}
306
307#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9
308#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f
309#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0
310static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val)
311{
312 return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK;
313}
314
315#define REG_AXXX_SCRATCH_UMSK 0x000001dc
316#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff
317#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0
318static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val)
319{
320 return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK;
321}
322#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000
323#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16
324static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
325{
326 return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK;
327}
328
329#define REG_AXXX_SCRATCH_ADDR 0x000001dd
330
331#define REG_AXXX_CP_ME_RDADDR 0x000001ea
332
333#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec
334
335#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
336
337#define REG_AXXX_CP_INT_CNTL 0x000001f2
338
339#define REG_AXXX_CP_INT_STATUS 0x000001f3
340
341#define REG_AXXX_CP_INT_ACK 0x000001f4
342
343#define REG_AXXX_CP_ME_CNTL 0x000001f6
344
345#define REG_AXXX_CP_ME_STATUS 0x000001f7
346
347#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8
348
349#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9
350
351#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa
352
353#define REG_AXXX_CP_DEBUG 0x000001fc
354#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000
355#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000
356#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000
357#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000
358#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000
359#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000
360#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000
361#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000
362
363#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd
364#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f
365#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0
366static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val)
367{
368 return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK;
369}
370#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000
371#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16
372static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val)
373{
374 return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK;
375}
376
377#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe
378#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f
379#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0
380static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val)
381{
382 return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK;
383}
384#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000
385#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16
386static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val)
387{
388 return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK;
389}
390
391#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff
392#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f
393#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0
394static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val)
395{
396 return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK;
397}
398#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000
399#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16
400static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
401{
402 return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
403}
404
405#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
406
407#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
408
409#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a
410
411#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b
412
413#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c
414
415#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d
416
417#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e
418
419#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
420
421#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
422
423#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
424
425#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c
426
427#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d
428
429#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
430
431
432#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
new file mode 100644
index 000000000000..a60584763b61
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -0,0 +1,370 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "adreno_gpu.h"
19#include "msm_gem.h"
20
21struct adreno_info {
22 struct adreno_rev rev;
23 uint32_t revn;
24 const char *name;
25 const char *pm4fw, *pfpfw;
26 uint32_t gmem;
27};
28
29#define ANY_ID 0xff
30
31static const struct adreno_info gpulist[] = {
32 {
33 .rev = ADRENO_REV(3, 0, 5, ANY_ID),
34 .revn = 305,
35 .name = "A305",
36 .pm4fw = "a300_pm4.fw",
37 .pfpfw = "a300_pfp.fw",
38 .gmem = SZ_256K,
39 }, {
40 .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
41 .revn = 320,
42 .name = "A320",
43 .pm4fw = "a300_pm4.fw",
44 .pfpfw = "a300_pfp.fw",
45 .gmem = SZ_512K,
46 }, {
47 .rev = ADRENO_REV(3, 3, 0, 0),
48 .revn = 330,
49 .name = "A330",
50 .pm4fw = "a330_pm4.fw",
51 .pfpfw = "a330_pfp.fw",
52 .gmem = SZ_1M,
53 },
54};
55
56#define RB_SIZE SZ_32K
57#define RB_BLKSIZE 16
58
59int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
60{
61 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
62
63 switch (param) {
64 case MSM_PARAM_GPU_ID:
65 *value = adreno_gpu->info->revn;
66 return 0;
67 case MSM_PARAM_GMEM_SIZE:
68 *value = adreno_gpu->info->gmem;
69 return 0;
70 default:
71 DBG("%s: invalid param: %u", gpu->name, param);
72 return -EINVAL;
73 }
74}
75
76#define rbmemptr(adreno_gpu, member) \
77 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
78
79int adreno_hw_init(struct msm_gpu *gpu)
80{
81 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
82
83 DBG("%s", gpu->name);
84
85 /* Setup REG_CP_RB_CNTL: */
86 gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
87 /* size is log2(quad-words): */
88 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
89 AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
90
91 /* Setup ringbuffer address: */
92 gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
93 gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
94
95 /* Setup scratch/timestamp: */
96 gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
97
98 gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
99
100 return 0;
101}
102
103static uint32_t get_wptr(struct msm_ringbuffer *ring)
104{
105 return ring->cur - ring->start;
106}
107
108uint32_t adreno_last_fence(struct msm_gpu *gpu)
109{
110 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
111 return adreno_gpu->memptrs->fence;
112}
113
114void adreno_recover(struct msm_gpu *gpu)
115{
116 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
117 struct drm_device *dev = gpu->dev;
118 int ret;
119
120 gpu->funcs->pm_suspend(gpu);
121
122 /* reset ringbuffer: */
123 gpu->rb->cur = gpu->rb->start;
124
125 /* reset completed fence seqno, just discard anything pending: */
126 adreno_gpu->memptrs->fence = gpu->submitted_fence;
127
128 gpu->funcs->pm_resume(gpu);
129 ret = gpu->funcs->hw_init(gpu);
130 if (ret) {
131 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
132 /* hmm, oh well? */
133 }
134}
135
136int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
137 struct msm_file_private *ctx)
138{
139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140 struct msm_drm_private *priv = gpu->dev->dev_private;
141 struct msm_ringbuffer *ring = gpu->rb;
142 unsigned i, ibs = 0;
143
144 for (i = 0; i < submit->nr_cmds; i++) {
145 switch (submit->cmd[i].type) {
146 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
147 /* ignore IB-targets */
148 break;
149 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
150 /* ignore if there has not been a ctx switch: */
151 if (priv->lastctx == ctx)
152 break;
153 case MSM_SUBMIT_CMD_BUF:
154 OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
155 OUT_RING(ring, submit->cmd[i].iova);
156 OUT_RING(ring, submit->cmd[i].size);
157 ibs++;
158 break;
159 }
160 }
161
162 /* on a320, at least, we seem to need to pad things out to an
163 * even number of qwords to avoid issue w/ CP hanging on wrap-
164 * around:
165 */
166 if (ibs % 2)
167 OUT_PKT2(ring);
168
169 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
170 OUT_RING(ring, submit->fence);
171
172 if (adreno_is_a3xx(adreno_gpu)) {
173 /* Flush HLSQ lazy updates to make sure there is nothing
174 * pending for indirect loads after the timestamp has
175 * passed:
176 */
177 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
178 OUT_RING(ring, HLSQ_FLUSH);
179
180 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
181 OUT_RING(ring, 0x00000000);
182 }
183
184 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
185 OUT_RING(ring, CACHE_FLUSH_TS);
186 OUT_RING(ring, rbmemptr(adreno_gpu, fence));
187 OUT_RING(ring, submit->fence);
188
189 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
190 OUT_PKT3(ring, CP_INTERRUPT, 1);
191 OUT_RING(ring, 0x80000000);
192
193#if 0
194 if (adreno_is_a3xx(adreno_gpu)) {
195 /* Dummy set-constant to trigger context rollover */
196 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
197 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
198 OUT_RING(ring, 0x00000000);
199 }
200#endif
201
202 gpu->funcs->flush(gpu);
203
204 return 0;
205}
206
207void adreno_flush(struct msm_gpu *gpu)
208{
209 uint32_t wptr = get_wptr(gpu->rb);
210
211 /* ensure writes to ringbuffer have hit system memory: */
212 mb();
213
214 gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
215}
216
217void adreno_idle(struct msm_gpu *gpu)
218{
219 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
220 uint32_t rptr, wptr = get_wptr(gpu->rb);
221 unsigned long t;
222
223 t = jiffies + ADRENO_IDLE_TIMEOUT;
224
225 /* then wait for CP to drain ringbuffer: */
226 do {
227 rptr = adreno_gpu->memptrs->rptr;
228 if (rptr == wptr)
229 return;
230 } while(time_before(jiffies, t));
231
232 DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
233
234 /* TODO maybe we need to reset GPU here to recover from hang? */
235}
236
237#ifdef CONFIG_DEBUG_FS
238void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
239{
240 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
241
242 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
243 adreno_gpu->info->revn, adreno_gpu->rev.core,
244 adreno_gpu->rev.major, adreno_gpu->rev.minor,
245 adreno_gpu->rev.patchid);
246
247 seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
248 gpu->submitted_fence);
249 seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
250 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
251 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
252}
253#endif
254
255void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
256{
257 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
258 uint32_t freedwords;
259 do {
260 uint32_t size = gpu->rb->size / 4;
261 uint32_t wptr = get_wptr(gpu->rb);
262 uint32_t rptr = adreno_gpu->memptrs->rptr;
263 freedwords = (rptr + (size - 1) - wptr) % size;
264 } while(freedwords < ndwords);
265}
266
267static const char *iommu_ports[] = {
268 "gfx3d_user", "gfx3d_priv",
269 "gfx3d1_user", "gfx3d1_priv",
270};
271
272static inline bool _rev_match(uint8_t entry, uint8_t id)
273{
274 return (entry == ANY_ID) || (entry == id);
275}
276
277int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
278 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
279 struct adreno_rev rev)
280{
281 int i, ret;
282
283 /* identify gpu: */
284 for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
285 const struct adreno_info *info = &gpulist[i];
286 if (_rev_match(info->rev.core, rev.core) &&
287 _rev_match(info->rev.major, rev.major) &&
288 _rev_match(info->rev.minor, rev.minor) &&
289 _rev_match(info->rev.patchid, rev.patchid)) {
290 gpu->info = info;
291 gpu->revn = info->revn;
292 break;
293 }
294 }
295
296 if (i == ARRAY_SIZE(gpulist)) {
297 dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
298 rev.core, rev.major, rev.minor, rev.patchid);
299 return -ENXIO;
300 }
301
302 DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
303 rev.core, rev.major, rev.minor, rev.patchid);
304
305 gpu->funcs = funcs;
306 gpu->rev = rev;
307
308 ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
309 if (ret) {
310 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
311 gpu->info->pm4fw, ret);
312 return ret;
313 }
314
315 ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
316 if (ret) {
317 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
318 gpu->info->pfpfw, ret);
319 return ret;
320 }
321
322 ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
323 gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
324 RB_SIZE);
325 if (ret)
326 return ret;
327
328 ret = msm_iommu_attach(drm, gpu->base.iommu,
329 iommu_ports, ARRAY_SIZE(iommu_ports));
330 if (ret)
331 return ret;
332
333 gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
334 MSM_BO_UNCACHED);
335 if (IS_ERR(gpu->memptrs_bo)) {
336 ret = PTR_ERR(gpu->memptrs_bo);
337 gpu->memptrs_bo = NULL;
338 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
339 return ret;
340 }
341
342 gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
343 if (!gpu->memptrs) {
344 dev_err(drm->dev, "could not vmap memptrs\n");
345 return -ENOMEM;
346 }
347
348 ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
349 &gpu->memptrs_iova);
350 if (ret) {
351 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
352 return ret;
353 }
354
355 return 0;
356}
357
358void adreno_gpu_cleanup(struct adreno_gpu *gpu)
359{
360 if (gpu->memptrs_bo) {
361 if (gpu->memptrs_iova)
362 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
363 drm_gem_object_unreference(gpu->memptrs_bo);
364 }
365 if (gpu->pm4)
366 release_firmware(gpu->pm4);
367 if (gpu->pfp)
368 release_firmware(gpu->pfp);
369 msm_gpu_cleanup(&gpu->base);
370}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
new file mode 100644
index 000000000000..f73abfba7c22
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ADRENO_GPU_H__
19#define __ADRENO_GPU_H__
20
21#include <linux/firmware.h>
22
23#include "msm_gpu.h"
24
25#include "adreno_common.xml.h"
26#include "adreno_pm4.xml.h"
27
28struct adreno_rev {
29 uint8_t core;
30 uint8_t major;
31 uint8_t minor;
32 uint8_t patchid;
33};
34
35#define ADRENO_REV(core, major, minor, patchid) \
36 ((struct adreno_rev){ core, major, minor, patchid })
37
38struct adreno_gpu_funcs {
39 struct msm_gpu_funcs base;
40};
41
42struct adreno_info;
43
44struct adreno_rbmemptrs {
45 volatile uint32_t rptr;
46 volatile uint32_t wptr;
47 volatile uint32_t fence;
48};
49
50struct adreno_gpu {
51 struct msm_gpu base;
52 struct adreno_rev rev;
53 const struct adreno_info *info;
54 uint32_t revn; /* numeric revision name */
55 const struct adreno_gpu_funcs *funcs;
56
57 /* firmware: */
58 const struct firmware *pm4, *pfp;
59
60 /* ringbuffer rptr/wptr: */
61 // TODO should this be in msm_ringbuffer? I think it would be
62 // different for z180..
63 struct adreno_rbmemptrs *memptrs;
64 struct drm_gem_object *memptrs_bo;
65 uint32_t memptrs_iova;
66};
67#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
68
69/* platform config data (ie. from DT, or pdata) */
70struct adreno_platform_config {
71 struct adreno_rev rev;
72 uint32_t fast_rate, slow_rate, bus_freq;
73};
74
75#define ADRENO_IDLE_TIMEOUT (20 * 1000)
76
77static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
78{
79 return (gpu->revn >= 300) && (gpu->revn < 400);
80}
81
82static inline bool adreno_is_a305(struct adreno_gpu *gpu)
83{
84 return gpu->revn == 305;
85}
86
87static inline bool adreno_is_a320(struct adreno_gpu *gpu)
88{
89 return gpu->revn == 320;
90}
91
92static inline bool adreno_is_a330(struct adreno_gpu *gpu)
93{
94 return gpu->revn == 330;
95}
96
97int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
98int adreno_hw_init(struct msm_gpu *gpu);
99uint32_t adreno_last_fence(struct msm_gpu *gpu);
100void adreno_recover(struct msm_gpu *gpu);
101int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
102 struct msm_file_private *ctx);
103void adreno_flush(struct msm_gpu *gpu);
104void adreno_idle(struct msm_gpu *gpu);
105#ifdef CONFIG_DEBUG_FS
106void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
107#endif
108void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
109
110int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
111 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
112 struct adreno_rev rev);
113void adreno_gpu_cleanup(struct adreno_gpu *gpu);
114
115
116/* ringbuffer helpers (the parts that are adreno specific) */
117
118static inline void
119OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
120{
121 adreno_wait_ring(ring->gpu, cnt+1);
122 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
123}
124
125/* no-op packet: */
126static inline void
127OUT_PKT2(struct msm_ringbuffer *ring)
128{
129 adreno_wait_ring(ring->gpu, 1);
130 OUT_RING(ring, CP_TYPE2_PKT);
131}
132
133static inline void
134OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
135{
136 adreno_wait_ring(ring->gpu, cnt+1);
137 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
138}
139
140
141#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
new file mode 100644
index 000000000000..94c13f418e75
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -0,0 +1,254 @@
1#ifndef ADRENO_PM4_XML
2#define ADRENO_PM4_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum vgt_event_type {
44 VS_DEALLOC = 0,
45 PS_DEALLOC = 1,
46 VS_DONE_TS = 2,
47 PS_DONE_TS = 3,
48 CACHE_FLUSH_TS = 4,
49 CONTEXT_DONE = 5,
50 CACHE_FLUSH = 6,
51 HLSQ_FLUSH = 7,
52 VIZQUERY_START = 7,
53 VIZQUERY_END = 8,
54 SC_WAIT_WC = 9,
55 RST_PIX_CNT = 13,
56 RST_VTX_CNT = 14,
57 TILE_FLUSH = 15,
58 CACHE_FLUSH_AND_INV_TS_EVENT = 20,
59 ZPASS_DONE = 21,
60 CACHE_FLUSH_AND_INV_EVENT = 22,
61 PERFCOUNTER_START = 23,
62 PERFCOUNTER_STOP = 24,
63 VS_FETCH_DONE = 27,
64 FACENESS_FLUSH = 28,
65};
66
67enum pc_di_primtype {
68 DI_PT_NONE = 0,
69 DI_PT_POINTLIST = 1,
70 DI_PT_LINELIST = 2,
71 DI_PT_LINESTRIP = 3,
72 DI_PT_TRILIST = 4,
73 DI_PT_TRIFAN = 5,
74 DI_PT_TRISTRIP = 6,
75 DI_PT_RECTLIST = 8,
76 DI_PT_QUADLIST = 13,
77 DI_PT_QUADSTRIP = 14,
78 DI_PT_POLYGON = 15,
79 DI_PT_2D_COPY_RECT_LIST_V0 = 16,
80 DI_PT_2D_COPY_RECT_LIST_V1 = 17,
81 DI_PT_2D_COPY_RECT_LIST_V2 = 18,
82 DI_PT_2D_COPY_RECT_LIST_V3 = 19,
83 DI_PT_2D_FILL_RECT_LIST = 20,
84 DI_PT_2D_LINE_STRIP = 21,
85 DI_PT_2D_TRI_STRIP = 22,
86};
87
88enum pc_di_src_sel {
89 DI_SRC_SEL_DMA = 0,
90 DI_SRC_SEL_IMMEDIATE = 1,
91 DI_SRC_SEL_AUTO_INDEX = 2,
92 DI_SRC_SEL_RESERVED = 3,
93};
94
95enum pc_di_index_size {
96 INDEX_SIZE_IGN = 0,
97 INDEX_SIZE_16_BIT = 0,
98 INDEX_SIZE_32_BIT = 1,
99 INDEX_SIZE_8_BIT = 2,
100 INDEX_SIZE_INVALID = 0,
101};
102
103enum pc_di_vis_cull_mode {
104 IGNORE_VISIBILITY = 0,
105};
106
107enum adreno_pm4_packet_type {
108 CP_TYPE0_PKT = 0,
109 CP_TYPE1_PKT = 0x40000000,
110 CP_TYPE2_PKT = 0x80000000,
111 CP_TYPE3_PKT = 0xc0000000,
112};
113
114enum adreno_pm4_type3_packets {
115 CP_ME_INIT = 72,
116 CP_NOP = 16,
117 CP_INDIRECT_BUFFER = 63,
118 CP_INDIRECT_BUFFER_PFD = 55,
119 CP_WAIT_FOR_IDLE = 38,
120 CP_WAIT_REG_MEM = 60,
121 CP_WAIT_REG_EQ = 82,
122 CP_WAT_REG_GTE = 83,
123 CP_WAIT_UNTIL_READ = 92,
124 CP_WAIT_IB_PFD_COMPLETE = 93,
125 CP_REG_RMW = 33,
126 CP_SET_BIN_DATA = 47,
127 CP_REG_TO_MEM = 62,
128 CP_MEM_WRITE = 61,
129 CP_MEM_WRITE_CNTR = 79,
130 CP_COND_EXEC = 68,
131 CP_COND_WRITE = 69,
132 CP_EVENT_WRITE = 70,
133 CP_EVENT_WRITE_SHD = 88,
134 CP_EVENT_WRITE_CFL = 89,
135 CP_EVENT_WRITE_ZPD = 91,
136 CP_RUN_OPENCL = 49,
137 CP_DRAW_INDX = 34,
138 CP_DRAW_INDX_2 = 54,
139 CP_DRAW_INDX_BIN = 52,
140 CP_DRAW_INDX_2_BIN = 53,
141 CP_VIZ_QUERY = 35,
142 CP_SET_STATE = 37,
143 CP_SET_CONSTANT = 45,
144 CP_IM_LOAD = 39,
145 CP_IM_LOAD_IMMEDIATE = 43,
146 CP_LOAD_CONSTANT_CONTEXT = 46,
147 CP_INVALIDATE_STATE = 59,
148 CP_SET_SHADER_BASES = 74,
149 CP_SET_BIN_MASK = 80,
150 CP_SET_BIN_SELECT = 81,
151 CP_CONTEXT_UPDATE = 94,
152 CP_INTERRUPT = 64,
153 CP_IM_STORE = 44,
154 CP_SET_BIN_BASE_OFFSET = 75,
155 CP_SET_DRAW_INIT_FLAGS = 75,
156 CP_SET_PROTECTED_MODE = 95,
157 CP_LOAD_STATE = 48,
158 CP_COND_INDIRECT_BUFFER_PFE = 58,
159 CP_COND_INDIRECT_BUFFER_PFD = 50,
160 CP_INDIRECT_BUFFER_PFE = 63,
161 CP_SET_BIN = 76,
162};
163
164enum adreno_state_block {
165 SB_VERT_TEX = 0,
166 SB_VERT_MIPADDR = 1,
167 SB_FRAG_TEX = 2,
168 SB_FRAG_MIPADDR = 3,
169 SB_VERT_SHADER = 4,
170 SB_FRAG_SHADER = 6,
171};
172
173enum adreno_state_type {
174 ST_SHADER = 0,
175 ST_CONSTANTS = 1,
176};
177
178enum adreno_state_src {
179 SS_DIRECT = 0,
180 SS_INDIRECT = 4,
181};
182
183#define REG_CP_LOAD_STATE_0 0x00000000
184#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
185#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
186static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val)
187{
188 return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK;
189}
190#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000
191#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16
192static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val)
193{
194 return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK;
195}
196#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000
197#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19
198static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
199{
200 return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
201}
202#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
203#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
204static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
205{
206 return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK;
207}
208
209#define REG_CP_LOAD_STATE_1 0x00000001
210#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003
211#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0
212static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
213{
214 return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK;
215}
216#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc
217#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
218static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
219{
220 return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
221}
222
223#define REG_CP_SET_BIN_0 0x00000000
224
225#define REG_CP_SET_BIN_1 0x00000001
226#define CP_SET_BIN_1_X1__MASK 0x0000ffff
227#define CP_SET_BIN_1_X1__SHIFT 0
228static inline uint32_t CP_SET_BIN_1_X1(uint32_t val)
229{
230 return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK;
231}
232#define CP_SET_BIN_1_Y1__MASK 0xffff0000
233#define CP_SET_BIN_1_Y1__SHIFT 16
234static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val)
235{
236 return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK;
237}
238
239#define REG_CP_SET_BIN_2 0x00000002
240#define CP_SET_BIN_2_X2__MASK 0x0000ffff
241#define CP_SET_BIN_2_X2__SHIFT 0
242static inline uint32_t CP_SET_BIN_2_X2(uint32_t val)
243{
244 return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK;
245}
246#define CP_SET_BIN_2_Y2__MASK 0xffff0000
247#define CP_SET_BIN_2_Y2__SHIFT 16
248static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
249{
250 return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
251}
252
253
254#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
new file mode 100644
index 000000000000..6f8396be431d
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -0,0 +1,502 @@
1#ifndef DSI_XML
2#define DSI_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum dsi_traffic_mode {
46 NON_BURST_SYNCH_PULSE = 0,
47 NON_BURST_SYNCH_EVENT = 1,
48 BURST_MODE = 2,
49};
50
51enum dsi_dst_format {
52 DST_FORMAT_RGB565 = 0,
53 DST_FORMAT_RGB666 = 1,
54 DST_FORMAT_RGB666_LOOSE = 2,
55 DST_FORMAT_RGB888 = 3,
56};
57
58enum dsi_rgb_swap {
59 SWAP_RGB = 0,
60 SWAP_RBG = 1,
61 SWAP_BGR = 2,
62 SWAP_BRG = 3,
63 SWAP_GRB = 4,
64 SWAP_GBR = 5,
65};
66
67enum dsi_cmd_trigger {
68 TRIGGER_NONE = 0,
69 TRIGGER_TE = 2,
70 TRIGGER_SW = 4,
71 TRIGGER_SW_SEOF = 5,
72 TRIGGER_SW_TE = 6,
73};
74
75#define DSI_IRQ_CMD_DMA_DONE 0x00000001
76#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
77#define DSI_IRQ_CMD_MDP_DONE 0x00000100
78#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
79#define DSI_IRQ_VIDEO_DONE 0x00010000
80#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
81#define DSI_IRQ_ERROR 0x01000000
82#define DSI_IRQ_MASK_ERROR 0x02000000
83#define REG_DSI_CTRL 0x00000000
84#define DSI_CTRL_ENABLE 0x00000001
85#define DSI_CTRL_VID_MODE_EN 0x00000002
86#define DSI_CTRL_CMD_MODE_EN 0x00000004
87#define DSI_CTRL_LANE0 0x00000010
88#define DSI_CTRL_LANE1 0x00000020
89#define DSI_CTRL_LANE2 0x00000040
90#define DSI_CTRL_LANE3 0x00000080
91#define DSI_CTRL_CLK_EN 0x00000100
92#define DSI_CTRL_ECC_CHECK 0x00100000
93#define DSI_CTRL_CRC_CHECK 0x01000000
94
95#define REG_DSI_STATUS0 0x00000004
96#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
97#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
98#define DSI_STATUS0_DSI_BUSY 0x00000010
99
100#define REG_DSI_FIFO_STATUS 0x00000008
101
102#define REG_DSI_VID_CFG0 0x0000000c
103#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
104#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0
105static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
106{
107 return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK;
108}
109#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
110#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
111static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
112{
113 return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
114}
115#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300
116#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8
117static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
118{
119 return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK;
120}
121#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000
122#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000
123#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000
124#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000
125#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000
126#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
127
128#define REG_DSI_VID_CFG1 0x0000001c
129#define DSI_VID_CFG1_R_SEL 0x00000010
130#define DSI_VID_CFG1_G_SEL 0x00000100
131#define DSI_VID_CFG1_B_SEL 0x00001000
132#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000
133#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16
134static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
135{
136 return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
137}
138#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
139#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
140static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
141{
142 return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
143}
144
145#define REG_DSI_ACTIVE_H 0x00000020
146#define DSI_ACTIVE_H_START__MASK 0x00000fff
147#define DSI_ACTIVE_H_START__SHIFT 0
148static inline uint32_t DSI_ACTIVE_H_START(uint32_t val)
149{
150 return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK;
151}
152#define DSI_ACTIVE_H_END__MASK 0x0fff0000
153#define DSI_ACTIVE_H_END__SHIFT 16
154static inline uint32_t DSI_ACTIVE_H_END(uint32_t val)
155{
156 return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK;
157}
158
159#define REG_DSI_ACTIVE_V 0x00000024
160#define DSI_ACTIVE_V_START__MASK 0x00000fff
161#define DSI_ACTIVE_V_START__SHIFT 0
162static inline uint32_t DSI_ACTIVE_V_START(uint32_t val)
163{
164 return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK;
165}
166#define DSI_ACTIVE_V_END__MASK 0x0fff0000
167#define DSI_ACTIVE_V_END__SHIFT 16
168static inline uint32_t DSI_ACTIVE_V_END(uint32_t val)
169{
170 return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK;
171}
172
173#define REG_DSI_TOTAL 0x00000028
174#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff
175#define DSI_TOTAL_H_TOTAL__SHIFT 0
176static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val)
177{
178 return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK;
179}
180#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000
181#define DSI_TOTAL_V_TOTAL__SHIFT 16
182static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val)
183{
184 return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK;
185}
186
187#define REG_DSI_ACTIVE_HSYNC 0x0000002c
188#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff
189#define DSI_ACTIVE_HSYNC_START__SHIFT 0
190static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val)
191{
192 return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK;
193}
194#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000
195#define DSI_ACTIVE_HSYNC_END__SHIFT 16
196static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
197{
198 return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
199}
200
201#define REG_DSI_ACTIVE_VSYNC 0x00000034
202#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff
203#define DSI_ACTIVE_VSYNC_START__SHIFT 0
204static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
205{
206 return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
207}
208#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000
209#define DSI_ACTIVE_VSYNC_END__SHIFT 16
210static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
211{
212 return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
213}
214
215#define REG_DSI_CMD_DMA_CTRL 0x00000038
216#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
217#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
218
219#define REG_DSI_CMD_CFG0 0x0000003c
220
221#define REG_DSI_CMD_CFG1 0x00000040
222
223#define REG_DSI_DMA_BASE 0x00000044
224
225#define REG_DSI_DMA_LEN 0x00000048
226
227#define REG_DSI_ACK_ERR_STATUS 0x00000064
228
229static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
230
231static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
232
233#define REG_DSI_TRIG_CTRL 0x00000080
234#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f
235#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
236static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
237{
238 return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
239}
240#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0
241#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
242static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
243{
244 return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
245}
246#define DSI_TRIG_CTRL_STREAM 0x00000100
247#define DSI_TRIG_CTRL_TE 0x80000000
248
249#define REG_DSI_TRIG_DMA 0x0000008c
250
251#define REG_DSI_DLN0_PHY_ERR 0x000000b0
252
253#define REG_DSI_TIMEOUT_STATUS 0x000000bc
254
255#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0
256#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f
257#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0
258static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val)
259{
260 return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK;
261}
262#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00
263#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8
264static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
265{
266 return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK;
267}
268
269#define REG_DSI_EOT_PACKET_CTRL 0x000000c8
270#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
271#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
272
273#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
274
275#define REG_DSI_ERR_INT_MASK0 0x00000108
276
277#define REG_DSI_INTR_CTRL 0x0000010c
278
279#define REG_DSI_RESET 0x00000114
280
281#define REG_DSI_CLK_CTRL 0x00000118
282
283#define REG_DSI_PHY_RESET 0x00000128
284
285#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
286#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
287
288#define REG_DSI_PHY_PLL_CTRL_1 0x00000204
289
290#define REG_DSI_PHY_PLL_CTRL_2 0x00000208
291
292#define REG_DSI_PHY_PLL_CTRL_3 0x0000020c
293
294#define REG_DSI_PHY_PLL_CTRL_4 0x00000210
295
296#define REG_DSI_PHY_PLL_CTRL_5 0x00000214
297
298#define REG_DSI_PHY_PLL_CTRL_6 0x00000218
299
300#define REG_DSI_PHY_PLL_CTRL_7 0x0000021c
301
302#define REG_DSI_PHY_PLL_CTRL_8 0x00000220
303
304#define REG_DSI_PHY_PLL_CTRL_9 0x00000224
305
306#define REG_DSI_PHY_PLL_CTRL_10 0x00000228
307
308#define REG_DSI_PHY_PLL_CTRL_11 0x0000022c
309
310#define REG_DSI_PHY_PLL_CTRL_12 0x00000230
311
312#define REG_DSI_PHY_PLL_CTRL_13 0x00000234
313
314#define REG_DSI_PHY_PLL_CTRL_14 0x00000238
315
316#define REG_DSI_PHY_PLL_CTRL_15 0x0000023c
317
318#define REG_DSI_PHY_PLL_CTRL_16 0x00000240
319
320#define REG_DSI_PHY_PLL_CTRL_17 0x00000244
321
322#define REG_DSI_PHY_PLL_CTRL_18 0x00000248
323
324#define REG_DSI_PHY_PLL_CTRL_19 0x0000024c
325
326#define REG_DSI_PHY_PLL_CTRL_20 0x00000250
327
328#define REG_DSI_PHY_PLL_STATUS 0x00000280
329#define DSI_PHY_PLL_STATUS_PLL_BUSY 0x00000001
330
331#define REG_DSI_8x60_PHY_TPA_CTRL_1 0x00000258
332
333#define REG_DSI_8x60_PHY_TPA_CTRL_2 0x0000025c
334
335#define REG_DSI_8x60_PHY_TIMING_CTRL_0 0x00000260
336
337#define REG_DSI_8x60_PHY_TIMING_CTRL_1 0x00000264
338
339#define REG_DSI_8x60_PHY_TIMING_CTRL_2 0x00000268
340
341#define REG_DSI_8x60_PHY_TIMING_CTRL_3 0x0000026c
342
343#define REG_DSI_8x60_PHY_TIMING_CTRL_4 0x00000270
344
345#define REG_DSI_8x60_PHY_TIMING_CTRL_5 0x00000274
346
347#define REG_DSI_8x60_PHY_TIMING_CTRL_6 0x00000278
348
349#define REG_DSI_8x60_PHY_TIMING_CTRL_7 0x0000027c
350
351#define REG_DSI_8x60_PHY_TIMING_CTRL_8 0x00000280
352
353#define REG_DSI_8x60_PHY_TIMING_CTRL_9 0x00000284
354
355#define REG_DSI_8x60_PHY_TIMING_CTRL_10 0x00000288
356
357#define REG_DSI_8x60_PHY_TIMING_CTRL_11 0x0000028c
358
359#define REG_DSI_8x60_PHY_CTRL_0 0x00000290
360
361#define REG_DSI_8x60_PHY_CTRL_1 0x00000294
362
363#define REG_DSI_8x60_PHY_CTRL_2 0x00000298
364
365#define REG_DSI_8x60_PHY_CTRL_3 0x0000029c
366
367#define REG_DSI_8x60_PHY_STRENGTH_0 0x000002a0
368
369#define REG_DSI_8x60_PHY_STRENGTH_1 0x000002a4
370
371#define REG_DSI_8x60_PHY_STRENGTH_2 0x000002a8
372
373#define REG_DSI_8x60_PHY_STRENGTH_3 0x000002ac
374
375#define REG_DSI_8x60_PHY_REGULATOR_CTRL_0 0x000002cc
376
377#define REG_DSI_8x60_PHY_REGULATOR_CTRL_1 0x000002d0
378
379#define REG_DSI_8x60_PHY_REGULATOR_CTRL_2 0x000002d4
380
381#define REG_DSI_8x60_PHY_REGULATOR_CTRL_3 0x000002d8
382
383#define REG_DSI_8x60_PHY_REGULATOR_CTRL_4 0x000002dc
384
385#define REG_DSI_8x60_PHY_CAL_HW_TRIGGER 0x000000f0
386
387#define REG_DSI_8x60_PHY_CAL_CTRL 0x000000f4
388
389#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc
390#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000
391
392static inline uint32_t REG_DSI_8960_LN(uint32_t i0) { return 0x00000300 + 0x40*i0; }
393
394static inline uint32_t REG_DSI_8960_LN_CFG_0(uint32_t i0) { return 0x00000300 + 0x40*i0; }
395
396static inline uint32_t REG_DSI_8960_LN_CFG_1(uint32_t i0) { return 0x00000304 + 0x40*i0; }
397
398static inline uint32_t REG_DSI_8960_LN_CFG_2(uint32_t i0) { return 0x00000308 + 0x40*i0; }
399
400static inline uint32_t REG_DSI_8960_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000030c + 0x40*i0; }
401
402static inline uint32_t REG_DSI_8960_LN_TEST_STR_0(uint32_t i0) { return 0x00000314 + 0x40*i0; }
403
404static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x00000318 + 0x40*i0; }
405
406#define REG_DSI_8960_PHY_LNCK_CFG_0 0x00000400
407
408#define REG_DSI_8960_PHY_LNCK_CFG_1 0x00000404
409
410#define REG_DSI_8960_PHY_LNCK_CFG_2 0x00000408
411
412#define REG_DSI_8960_PHY_LNCK_TEST_DATAPATH 0x0000040c
413
414#define REG_DSI_8960_PHY_LNCK_TEST_STR0 0x00000414
415
416#define REG_DSI_8960_PHY_LNCK_TEST_STR1 0x00000418
417
418#define REG_DSI_8960_PHY_TIMING_CTRL_0 0x00000440
419
420#define REG_DSI_8960_PHY_TIMING_CTRL_1 0x00000444
421
422#define REG_DSI_8960_PHY_TIMING_CTRL_2 0x00000448
423
424#define REG_DSI_8960_PHY_TIMING_CTRL_3 0x0000044c
425
426#define REG_DSI_8960_PHY_TIMING_CTRL_4 0x00000450
427
428#define REG_DSI_8960_PHY_TIMING_CTRL_5 0x00000454
429
430#define REG_DSI_8960_PHY_TIMING_CTRL_6 0x00000458
431
432#define REG_DSI_8960_PHY_TIMING_CTRL_7 0x0000045c
433
434#define REG_DSI_8960_PHY_TIMING_CTRL_8 0x00000460
435
436#define REG_DSI_8960_PHY_TIMING_CTRL_9 0x00000464
437
438#define REG_DSI_8960_PHY_TIMING_CTRL_10 0x00000468
439
440#define REG_DSI_8960_PHY_TIMING_CTRL_11 0x0000046c
441
442#define REG_DSI_8960_PHY_CTRL_0 0x00000470
443
444#define REG_DSI_8960_PHY_CTRL_1 0x00000474
445
446#define REG_DSI_8960_PHY_CTRL_2 0x00000478
447
448#define REG_DSI_8960_PHY_CTRL_3 0x0000047c
449
450#define REG_DSI_8960_PHY_STRENGTH_0 0x00000480
451
452#define REG_DSI_8960_PHY_STRENGTH_1 0x00000484
453
454#define REG_DSI_8960_PHY_STRENGTH_2 0x00000488
455
456#define REG_DSI_8960_PHY_BIST_CTRL_0 0x0000048c
457
458#define REG_DSI_8960_PHY_BIST_CTRL_1 0x00000490
459
460#define REG_DSI_8960_PHY_BIST_CTRL_2 0x00000494
461
462#define REG_DSI_8960_PHY_BIST_CTRL_3 0x00000498
463
464#define REG_DSI_8960_PHY_BIST_CTRL_4 0x0000049c
465
466#define REG_DSI_8960_PHY_LDO_CTRL 0x000004b0
467
468#define REG_DSI_8960_PHY_REGULATOR_CTRL_0 0x00000500
469
470#define REG_DSI_8960_PHY_REGULATOR_CTRL_1 0x00000504
471
472#define REG_DSI_8960_PHY_REGULATOR_CTRL_2 0x00000508
473
474#define REG_DSI_8960_PHY_REGULATOR_CTRL_3 0x0000050c
475
476#define REG_DSI_8960_PHY_REGULATOR_CTRL_4 0x00000510
477
478#define REG_DSI_8960_PHY_REGULATOR_CAL_PWR_CFG 0x00000518
479
480#define REG_DSI_8960_PHY_CAL_HW_TRIGGER 0x00000528
481
482#define REG_DSI_8960_PHY_CAL_SW_CFG_0 0x0000052c
483
484#define REG_DSI_8960_PHY_CAL_SW_CFG_1 0x00000530
485
486#define REG_DSI_8960_PHY_CAL_SW_CFG_2 0x00000534
487
488#define REG_DSI_8960_PHY_CAL_HW_CFG_0 0x00000538
489
490#define REG_DSI_8960_PHY_CAL_HW_CFG_1 0x0000053c
491
492#define REG_DSI_8960_PHY_CAL_HW_CFG_2 0x00000540
493
494#define REG_DSI_8960_PHY_CAL_HW_CFG_3 0x00000544
495
496#define REG_DSI_8960_PHY_CAL_HW_CFG_4 0x00000548
497
498#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
499#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
500
501
502#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
new file mode 100644
index 000000000000..aefc1b8feae9
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -0,0 +1,114 @@
1#ifndef MMSS_CC_XML
2#define MMSS_CC_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum mmss_cc_clk {
46 CLK = 0,
47 PCLK = 1,
48};
49
50#define REG_MMSS_CC_AHB 0x00000008
51
52static inline uint32_t __offset_CLK(enum mmss_cc_clk idx)
53{
54 switch (idx) {
55 case CLK: return 0x0000004c;
56 case PCLK: return 0x00000130;
57 default: return INVALID_IDX(idx);
58 }
59}
60static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
61
62static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
63#define MMSS_CC_CLK_CC_CLK_EN 0x00000001
64#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004
65#define MMSS_CC_CLK_CC_MND_EN 0x00000020
66#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0
67#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6
68static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val)
69{
70 return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK;
71}
72#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300
73#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8
74static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val)
75{
76 return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK;
77}
78
79static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); }
80#define MMSS_CC_CLK_MD_D__MASK 0x000000ff
81#define MMSS_CC_CLK_MD_D__SHIFT 0
82static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val)
83{
84 return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK;
85}
86#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00
87#define MMSS_CC_CLK_MD_M__SHIFT 8
88static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val)
89{
90 return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK;
91}
92
93static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); }
94#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f
95#define MMSS_CC_CLK_NS_SRC__SHIFT 0
96static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val)
97{
98 return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK;
99}
100#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000
101#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12
102static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val)
103{
104 return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK;
105}
106#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000
107#define MMSS_CC_CLK_NS_VAL__SHIFT 24
108static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
109{
110 return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
111}
112
113
114#endif /* MMSS_CC_XML */
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
new file mode 100644
index 000000000000..a225e8170b2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -0,0 +1,48 @@
1#ifndef SFPB_XML
2#define SFPB_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45#define REG_SFPB_CFG 0x00000058
46
47
48#endif /* SFPB_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
new file mode 100644
index 000000000000..50d11df35b21
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -0,0 +1,272 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20static struct platform_device *hdmi_pdev;
21
22void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
23{
24 uint32_t ctrl = 0;
25
26 if (power_on) {
27 ctrl |= HDMI_CTRL_ENABLE;
28 if (!hdmi->hdmi_mode) {
29 ctrl |= HDMI_CTRL_HDMI;
30 hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
31 ctrl &= ~HDMI_CTRL_HDMI;
32 } else {
33 ctrl |= HDMI_CTRL_HDMI;
34 }
35 } else {
36 ctrl = HDMI_CTRL_HDMI;
37 }
38
39 hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
40 DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
41 power_on ? "Enable" : "Disable", ctrl);
42}
43
44static irqreturn_t hdmi_irq(int irq, void *dev_id)
45{
46 struct hdmi *hdmi = dev_id;
47
48 /* Process HPD: */
49 hdmi_connector_irq(hdmi->connector);
50
51 /* Process DDC: */
52 hdmi_i2c_irq(hdmi->i2c);
53
54 /* TODO audio.. */
55
56 return IRQ_HANDLED;
57}
58
59void hdmi_destroy(struct kref *kref)
60{
61 struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
62 struct hdmi_phy *phy = hdmi->phy;
63
64 if (phy)
65 phy->funcs->destroy(phy);
66
67 if (hdmi->i2c)
68 hdmi_i2c_destroy(hdmi->i2c);
69
70 put_device(&hdmi->pdev->dev);
71}
72
73/* initialize connector */
74int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
75{
76 struct hdmi *hdmi = NULL;
77 struct msm_drm_private *priv = dev->dev_private;
78 struct platform_device *pdev = hdmi_pdev;
79 struct hdmi_platform_config *config;
80 int ret;
81
82 if (!pdev) {
83 dev_err(dev->dev, "no hdmi device\n");
84 ret = -ENXIO;
85 goto fail;
86 }
87
88 config = pdev->dev.platform_data;
89
90 hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
91 if (!hdmi) {
92 ret = -ENOMEM;
93 goto fail;
94 }
95
96 kref_init(&hdmi->refcount);
97
98 get_device(&pdev->dev);
99
100 hdmi->dev = dev;
101 hdmi->pdev = pdev;
102 hdmi->encoder = encoder;
103
104 /* not sure about which phy maps to which msm.. probably I miss some */
105 if (config->phy_init)
106 hdmi->phy = config->phy_init(hdmi);
107 else
108 hdmi->phy = ERR_PTR(-ENXIO);
109
110 if (IS_ERR(hdmi->phy)) {
111 ret = PTR_ERR(hdmi->phy);
112 dev_err(dev->dev, "failed to load phy: %d\n", ret);
113 hdmi->phy = NULL;
114 goto fail;
115 }
116
117 hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI");
118 if (IS_ERR(hdmi->mmio)) {
119 ret = PTR_ERR(hdmi->mmio);
120 goto fail;
121 }
122
123 hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
124 if (IS_ERR(hdmi->mvs))
125 hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
126 if (IS_ERR(hdmi->mvs)) {
127 ret = PTR_ERR(hdmi->mvs);
128 dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
129 goto fail;
130 }
131
132 hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
133 if (IS_ERR(hdmi->mpp0))
134 hdmi->mpp0 = NULL;
135
136 hdmi->clk = devm_clk_get(&pdev->dev, "core_clk");
137 if (IS_ERR(hdmi->clk)) {
138 ret = PTR_ERR(hdmi->clk);
139 dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
140 goto fail;
141 }
142
143 hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
144 if (IS_ERR(hdmi->m_pclk)) {
145 ret = PTR_ERR(hdmi->m_pclk);
146 dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
147 goto fail;
148 }
149
150 hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
151 if (IS_ERR(hdmi->s_pclk)) {
152 ret = PTR_ERR(hdmi->s_pclk);
153 dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
154 goto fail;
155 }
156
157 hdmi->i2c = hdmi_i2c_init(hdmi);
158 if (IS_ERR(hdmi->i2c)) {
159 ret = PTR_ERR(hdmi->i2c);
160 dev_err(dev->dev, "failed to get i2c: %d\n", ret);
161 hdmi->i2c = NULL;
162 goto fail;
163 }
164
165 hdmi->bridge = hdmi_bridge_init(hdmi);
166 if (IS_ERR(hdmi->bridge)) {
167 ret = PTR_ERR(hdmi->bridge);
168 dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
169 hdmi->bridge = NULL;
170 goto fail;
171 }
172
173 hdmi->connector = hdmi_connector_init(hdmi);
174 if (IS_ERR(hdmi->connector)) {
175 ret = PTR_ERR(hdmi->connector);
176 dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
177 hdmi->connector = NULL;
178 goto fail;
179 }
180
181 hdmi->irq = platform_get_irq(pdev, 0);
182 if (hdmi->irq < 0) {
183 ret = hdmi->irq;
184 dev_err(dev->dev, "failed to get irq: %d\n", ret);
185 goto fail;
186 }
187
188 ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
189 NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
190 "hdmi_isr", hdmi);
191 if (ret < 0) {
192 dev_err(dev->dev, "failed to request IRQ%u: %d\n",
193 hdmi->irq, ret);
194 goto fail;
195 }
196
197 encoder->bridge = hdmi->bridge;
198
199 priv->bridges[priv->num_bridges++] = hdmi->bridge;
200 priv->connectors[priv->num_connectors++] = hdmi->connector;
201
202 return 0;
203
204fail:
205 if (hdmi) {
206 /* bridge/connector are normally destroyed by drm: */
207 if (hdmi->bridge)
208 hdmi->bridge->funcs->destroy(hdmi->bridge);
209 if (hdmi->connector)
210 hdmi->connector->funcs->destroy(hdmi->connector);
211 hdmi_destroy(&hdmi->refcount);
212 }
213
214 return ret;
215}
216
217/*
218 * The hdmi device:
219 */
220
221static int hdmi_dev_probe(struct platform_device *pdev)
222{
223 static struct hdmi_platform_config config = {};
224#ifdef CONFIG_OF
225 /* TODO */
226#else
227 if (cpu_is_apq8064()) {
228 config.phy_init = hdmi_phy_8960_init;
229 config.ddc_clk_gpio = 70;
230 config.ddc_data_gpio = 71;
231 config.hpd_gpio = 72;
232 config.pmic_gpio = 13 + NR_GPIO_IRQS;
233 } else if (cpu_is_msm8960()) {
234 config.phy_init = hdmi_phy_8960_init;
235 config.ddc_clk_gpio = 100;
236 config.ddc_data_gpio = 101;
237 config.hpd_gpio = 102;
238 config.pmic_gpio = -1;
239 } else if (cpu_is_msm8x60()) {
240 config.phy_init = hdmi_phy_8x60_init;
241 config.ddc_clk_gpio = 170;
242 config.ddc_data_gpio = 171;
243 config.hpd_gpio = 172;
244 config.pmic_gpio = -1;
245 }
246#endif
247 pdev->dev.platform_data = &config;
248 hdmi_pdev = pdev;
249 return 0;
250}
251
252static int hdmi_dev_remove(struct platform_device *pdev)
253{
254 hdmi_pdev = NULL;
255 return 0;
256}
257
258static struct platform_driver hdmi_driver = {
259 .probe = hdmi_dev_probe,
260 .remove = hdmi_dev_remove,
261 .driver.name = "hdmi_msm",
262};
263
264void __init hdmi_register(void)
265{
266 platform_driver_register(&hdmi_driver);
267}
268
269void __exit hdmi_unregister(void)
270{
271 platform_driver_unregister(&hdmi_driver);
272}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
new file mode 100644
index 000000000000..2c2ec566394c
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -0,0 +1,131 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __HDMI_CONNECTOR_H__
19#define __HDMI_CONNECTOR_H__
20
21#include <linux/i2c.h>
22#include <linux/clk.h>
23#include <linux/platform_device.h>
24#include <linux/regulator/consumer.h>
25
26#include "msm_drv.h"
27#include "hdmi.xml.h"
28
29
30struct hdmi_phy;
31
32struct hdmi {
33 struct kref refcount;
34
35 struct drm_device *dev;
36 struct platform_device *pdev;
37
38 void __iomem *mmio;
39
40 struct regulator *mvs; /* HDMI_5V */
41 struct regulator *mpp0; /* External 5V */
42
43 struct clk *clk;
44 struct clk *m_pclk;
45 struct clk *s_pclk;
46
47 struct hdmi_phy *phy;
48 struct i2c_adapter *i2c;
49 struct drm_connector *connector;
50 struct drm_bridge *bridge;
51
52 /* the encoder we are hooked to (outside of hdmi block) */
53 struct drm_encoder *encoder;
54
55 bool hdmi_mode; /* are we in hdmi mode? */
56
57 int irq;
58};
59
60/* platform config data (ie. from DT, or pdata) */
61struct hdmi_platform_config {
62 struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
63 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio;
64};
65
66void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
67void hdmi_destroy(struct kref *kref);
68
69static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
70{
71 msm_writel(data, hdmi->mmio + reg);
72}
73
74static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
75{
76 return msm_readl(hdmi->mmio + reg);
77}
78
79static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
80{
81 kref_get(&hdmi->refcount);
82 return hdmi;
83}
84
85static inline void hdmi_unreference(struct hdmi *hdmi)
86{
87 kref_put(&hdmi->refcount, hdmi_destroy);
88}
89
90/*
91 * The phy appears to be different, for example between 8960 and 8x60,
92 * so split the phy related functions out and load the correct one at
93 * runtime:
94 */
95
96struct hdmi_phy_funcs {
97 void (*destroy)(struct hdmi_phy *phy);
98 void (*reset)(struct hdmi_phy *phy);
99 void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
100 void (*powerdown)(struct hdmi_phy *phy);
101};
102
103struct hdmi_phy {
104 const struct hdmi_phy_funcs *funcs;
105};
106
107struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
108struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
109
110/*
111 * hdmi bridge:
112 */
113
114struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi);
115
116/*
117 * hdmi connector:
118 */
119
120void hdmi_connector_irq(struct drm_connector *connector);
121struct drm_connector *hdmi_connector_init(struct hdmi *hdmi);
122
123/*
124 * i2c adapter for ddc:
125 */
126
127void hdmi_i2c_irq(struct i2c_adapter *i2c);
128void hdmi_i2c_destroy(struct i2c_adapter *i2c);
129struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
130
131#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
new file mode 100644
index 000000000000..f5fa4865e059
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -0,0 +1,508 @@
1#ifndef HDMI_XML
2#define HDMI_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum hdmi_hdcp_key_state {
46 NO_KEYS = 0,
47 NOT_CHECKED = 1,
48 CHECKING = 2,
49 KEYS_VALID = 3,
50 AKSV_INVALID = 4,
51 CHECKSUM_MISMATCH = 5,
52};
53
54enum hdmi_ddc_read_write {
55 DDC_WRITE = 0,
56 DDC_READ = 1,
57};
58
59enum hdmi_acr_cts {
60 ACR_NONE = 0,
61 ACR_32 = 1,
62 ACR_44 = 2,
63 ACR_48 = 3,
64};
65
66#define REG_HDMI_CTRL 0x00000000
67#define HDMI_CTRL_ENABLE 0x00000001
68#define HDMI_CTRL_HDMI 0x00000002
69#define HDMI_CTRL_ENCRYPTED 0x00000004
70
71#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020
72#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001
73
74#define REG_HDMI_ACR_PKT_CTRL 0x00000024
75#define HDMI_ACR_PKT_CTRL_CONT 0x00000001
76#define HDMI_ACR_PKT_CTRL_SEND 0x00000002
77#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030
78#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4
79static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val)
80{
81 return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK;
82}
83#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100
84#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000
85#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16
86static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
87{
88 return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK;
89}
90#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000
91
92#define REG_HDMI_VBI_PKT_CTRL 0x00000028
93#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010
94#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020
95#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100
96#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200
97#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000
98#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000
99
100#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c
101#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001
102#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002
103#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010
104#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020
105#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
106#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
107
108#define REG_HDMI_GEN_PKT_CTRL 0x00000034
109#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
110#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
111#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c
112#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2
113static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val)
114{
115 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK;
116}
117#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010
118#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020
119#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000
120#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16
121static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val)
122{
123 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK;
124}
125#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000
126#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24
127static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val)
128{
129 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK;
130}
131
132#define REG_HDMI_GC 0x00000040
133#define HDMI_GC_MUTE 0x00000001
134
135#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044
136#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001
137#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002
138
139static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; }
140
141#define REG_HDMI_GENERIC0_HDR 0x00000084
142
143static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; }
144
145#define REG_HDMI_GENERIC1_HDR 0x000000a4
146
147static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
148
149static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
150
151static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
152#define HDMI_ACR_0_CTS__MASK 0xfffff000
153#define HDMI_ACR_0_CTS__SHIFT 12
154static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
155{
156 return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
157}
158
159static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; }
160#define HDMI_ACR_1_N__MASK 0xffffffff
161#define HDMI_ACR_1_N__SHIFT 0
162static inline uint32_t HDMI_ACR_1_N(uint32_t val)
163{
164 return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK;
165}
166
167#define REG_HDMI_AUDIO_INFO0 0x000000e4
168#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff
169#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0
170static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val)
171{
172 return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK;
173}
174#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700
175#define HDMI_AUDIO_INFO0_CC__SHIFT 8
176static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val)
177{
178 return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK;
179}
180
181#define REG_HDMI_AUDIO_INFO1 0x000000e8
182#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff
183#define HDMI_AUDIO_INFO1_CA__SHIFT 0
184static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val)
185{
186 return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK;
187}
188#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800
189#define HDMI_AUDIO_INFO1_LSV__SHIFT 11
190static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val)
191{
192 return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK;
193}
194#define HDMI_AUDIO_INFO1_DM_INH 0x00008000
195
196#define REG_HDMI_HDCP_CTRL 0x00000110
197#define HDMI_HDCP_CTRL_ENABLE 0x00000001
198#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100
199
200#define REG_HDMI_HDCP_INT_CTRL 0x00000118
201
202#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c
203#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100
204#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200
205#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000
206#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28
207static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val)
208{
209 return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK;
210}
211
212#define REG_HDMI_HDCP_RESET 0x00000130
213#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
214
215#define REG_HDMI_AUDIO_CFG 0x000001d0
216#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
217#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
218#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4
219static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val)
220{
221 return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
222}
223
224#define REG_HDMI_USEC_REFTIMER 0x00000208
225
226#define REG_HDMI_DDC_CTRL 0x0000020c
227#define HDMI_DDC_CTRL_GO 0x00000001
228#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002
229#define HDMI_DDC_CTRL_SEND_RESET 0x00000004
230#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008
231#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000
232#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20
233static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
234{
235 return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
236}
237
238#define REG_HDMI_DDC_INT_CTRL 0x00000214
239#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
240#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
241#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004
242
243#define REG_HDMI_DDC_SW_STATUS 0x00000218
244#define HDMI_DDC_SW_STATUS_NACK0 0x00001000
245#define HDMI_DDC_SW_STATUS_NACK1 0x00002000
246#define HDMI_DDC_SW_STATUS_NACK2 0x00004000
247#define HDMI_DDC_SW_STATUS_NACK3 0x00008000
248
249#define REG_HDMI_DDC_HW_STATUS 0x0000021c
250
251#define REG_HDMI_DDC_SPEED 0x00000220
252#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003
253#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0
254static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val)
255{
256 return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK;
257}
258#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000
259#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16
260static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val)
261{
262 return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK;
263}
264
265#define REG_HDMI_DDC_SETUP 0x00000224
266#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000
267#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24
268static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val)
269{
270 return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK;
271}
272
273static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; }
274
275static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; }
276#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001
277#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0
278static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val)
279{
280 return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK;
281}
282#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100
283#define HDMI_I2C_TRANSACTION_REG_START 0x00001000
284#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000
285#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000
286#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16
287static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
288{
289 return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK;
290}
291
292#define REG_HDMI_DDC_DATA 0x00000238
293#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001
294#define HDMI_DDC_DATA_DATA_RW__SHIFT 0
295static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val)
296{
297 return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK;
298}
299#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00
300#define HDMI_DDC_DATA_DATA__SHIFT 8
301static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val)
302{
303 return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK;
304}
305#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000
306#define HDMI_DDC_DATA_INDEX__SHIFT 16
307static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val)
308{
309 return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK;
310}
311#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000
312
313#define REG_HDMI_HPD_INT_STATUS 0x00000250
314#define HDMI_HPD_INT_STATUS_INT 0x00000001
315#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002
316
317#define REG_HDMI_HPD_INT_CTRL 0x00000254
318#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001
319#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002
320#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004
321#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010
322#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020
323#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200
324
325#define REG_HDMI_HPD_CTRL 0x00000258
326#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff
327#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0
328static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val)
329{
330 return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK;
331}
332#define HDMI_HPD_CTRL_ENABLE 0x10000000
333
334#define REG_HDMI_DDC_REF 0x0000027c
335#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000
336#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff
337#define HDMI_DDC_REF_REFTIMER__SHIFT 0
338static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
339{
340 return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
341}
342
343#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
344#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
345#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
346static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
347{
348 return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
349}
350#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
351#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
352static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
353{
354 return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK;
355}
356
357#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
358#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff
359#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
360static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
361{
362 return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
363}
364#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000
365#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
366static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
367{
368 return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK;
369}
370
371#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
372#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff
373#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
374static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
375{
376 return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
377}
378#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000
379#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
380static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
381{
382 return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK;
383}
384
385#define REG_HDMI_TOTAL 0x000002c0
386#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff
387#define HDMI_TOTAL_H_TOTAL__SHIFT 0
388static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
389{
390 return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
391}
392#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000
393#define HDMI_TOTAL_V_TOTAL__SHIFT 16
394static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
395{
396 return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK;
397}
398
399#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
400#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff
401#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
402static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
403{
404 return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
405}
406
407#define REG_HDMI_FRAME_CTRL 0x000002c8
408#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000
409#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000
410#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
411#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
412
413#define REG_HDMI_PHY_CTRL 0x000002d4
414#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
415#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
416#define HDMI_PHY_CTRL_SW_RESET 0x00000004
417#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
418
419#define REG_HDMI_AUD_INT 0x000002cc
420#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
421#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
422#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
423#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
424
425#define REG_HDMI_8x60_PHY_REG0 0x00000300
426#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
427#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
428static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
429{
430 return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
431}
432
433#define REG_HDMI_8x60_PHY_REG1 0x00000304
434#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0
435#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4
436static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
437{
438 return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK;
439}
440#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f
441#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0
442static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
443{
444 return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
445}
446
447#define REG_HDMI_8x60_PHY_REG2 0x00000308
448#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001
449#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002
450#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004
451#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008
452#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010
453#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020
454#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040
455#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080
456
457#define REG_HDMI_8x60_PHY_REG3 0x0000030c
458#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001
459
460#define REG_HDMI_8x60_PHY_REG4 0x00000310
461
462#define REG_HDMI_8x60_PHY_REG5 0x00000314
463
464#define REG_HDMI_8x60_PHY_REG6 0x00000318
465
466#define REG_HDMI_8x60_PHY_REG7 0x0000031c
467
468#define REG_HDMI_8x60_PHY_REG8 0x00000320
469
470#define REG_HDMI_8x60_PHY_REG9 0x00000324
471
472#define REG_HDMI_8x60_PHY_REG10 0x00000328
473
474#define REG_HDMI_8x60_PHY_REG11 0x0000032c
475
476#define REG_HDMI_8x60_PHY_REG12 0x00000330
477#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001
478#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002
479#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010
480
481#define REG_HDMI_8960_PHY_REG0 0x00000400
482
483#define REG_HDMI_8960_PHY_REG1 0x00000404
484
485#define REG_HDMI_8960_PHY_REG2 0x00000408
486
487#define REG_HDMI_8960_PHY_REG3 0x0000040c
488
489#define REG_HDMI_8960_PHY_REG4 0x00000410
490
491#define REG_HDMI_8960_PHY_REG5 0x00000414
492
493#define REG_HDMI_8960_PHY_REG6 0x00000418
494
495#define REG_HDMI_8960_PHY_REG7 0x0000041c
496
497#define REG_HDMI_8960_PHY_REG8 0x00000420
498
499#define REG_HDMI_8960_PHY_REG9 0x00000424
500
501#define REG_HDMI_8960_PHY_REG10 0x00000428
502
503#define REG_HDMI_8960_PHY_REG11 0x0000042c
504
505#define REG_HDMI_8960_PHY_REG12 0x00000430
506
507
508#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
new file mode 100644
index 000000000000..5a8ee3473cf5
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -0,0 +1,167 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_bridge {
21 struct drm_bridge base;
22
23 struct hdmi *hdmi;
24
25 unsigned long int pixclock;
26};
27#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
28
29static void hdmi_bridge_destroy(struct drm_bridge *bridge)
30{
31 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
32 hdmi_unreference(hdmi_bridge->hdmi);
33 drm_bridge_cleanup(bridge);
34 kfree(hdmi_bridge);
35}
36
37static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
38{
39 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
40 struct hdmi *hdmi = hdmi_bridge->hdmi;
41 struct hdmi_phy *phy = hdmi->phy;
42
43 DBG("power up");
44 phy->funcs->powerup(phy, hdmi_bridge->pixclock);
45 hdmi_set_mode(hdmi, true);
46}
47
48static void hdmi_bridge_enable(struct drm_bridge *bridge)
49{
50}
51
52static void hdmi_bridge_disable(struct drm_bridge *bridge)
53{
54}
55
56static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
57{
58 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
59 struct hdmi *hdmi = hdmi_bridge->hdmi;
60 struct hdmi_phy *phy = hdmi->phy;
61
62 DBG("power down");
63 hdmi_set_mode(hdmi, false);
64 phy->funcs->powerdown(phy);
65}
66
67static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
68 struct drm_display_mode *mode,
69 struct drm_display_mode *adjusted_mode)
70{
71 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
72 struct hdmi *hdmi = hdmi_bridge->hdmi;
73 int hstart, hend, vstart, vend;
74 uint32_t frame_ctrl;
75
76 mode = adjusted_mode;
77
78 hdmi_bridge->pixclock = mode->clock * 1000;
79
80 hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
81
82 hstart = mode->htotal - mode->hsync_start;
83 hend = mode->htotal - mode->hsync_start + mode->hdisplay;
84
85 vstart = mode->vtotal - mode->vsync_start - 1;
86 vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
87
88 DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
89 mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
90
91 hdmi_write(hdmi, REG_HDMI_TOTAL,
92 HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
93 HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
94
95 hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
96 HDMI_ACTIVE_HSYNC_START(hstart) |
97 HDMI_ACTIVE_HSYNC_END(hend));
98 hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
99 HDMI_ACTIVE_VSYNC_START(vstart) |
100 HDMI_ACTIVE_VSYNC_END(vend));
101
102 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
103 hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
104 HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
105 hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
106 HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
107 HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
108 } else {
109 hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
110 HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
111 hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
112 HDMI_VSYNC_ACTIVE_F2_START(0) |
113 HDMI_VSYNC_ACTIVE_F2_END(0));
114 }
115
116 frame_ctrl = 0;
117 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
118 frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
119 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
120 frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
121 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
122 frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
123 DBG("frame_ctrl=%08x", frame_ctrl);
124 hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
125
126 // TODO until we have audio, this might be safest:
127 if (hdmi->hdmi_mode)
128 hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
129}
130
131static const struct drm_bridge_funcs hdmi_bridge_funcs = {
132 .pre_enable = hdmi_bridge_pre_enable,
133 .enable = hdmi_bridge_enable,
134 .disable = hdmi_bridge_disable,
135 .post_disable = hdmi_bridge_post_disable,
136 .mode_set = hdmi_bridge_mode_set,
137 .destroy = hdmi_bridge_destroy,
138};
139
140
141/* initialize bridge */
142struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
143{
144 struct drm_bridge *bridge = NULL;
145 struct hdmi_bridge *hdmi_bridge;
146 int ret;
147
148 hdmi_bridge = kzalloc(sizeof(*hdmi_bridge), GFP_KERNEL);
149 if (!hdmi_bridge) {
150 ret = -ENOMEM;
151 goto fail;
152 }
153
154 hdmi_bridge->hdmi = hdmi_reference(hdmi);
155
156 bridge = &hdmi_bridge->base;
157
158 drm_bridge_init(hdmi->dev, bridge, &hdmi_bridge_funcs);
159
160 return bridge;
161
162fail:
163 if (bridge)
164 hdmi_bridge_destroy(bridge);
165
166 return ERR_PTR(ret);
167}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
new file mode 100644
index 000000000000..823eee521a31
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -0,0 +1,367 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/gpio.h>
19
20#include "hdmi.h"
21
22struct hdmi_connector {
23 struct drm_connector base;
24 struct hdmi *hdmi;
25};
26#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
27
28static int gpio_config(struct hdmi *hdmi, bool on)
29{
30 struct drm_device *dev = hdmi->dev;
31 struct hdmi_platform_config *config =
32 hdmi->pdev->dev.platform_data;
33 int ret;
34
35 if (on) {
36 ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
37 if (ret) {
38 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
39 "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
40 goto error1;
41 }
42 ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
43 if (ret) {
44 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
45 "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
46 goto error2;
47 }
48 ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
49 if (ret) {
50 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
51 "HDMI_HPD", config->hpd_gpio, ret);
52 goto error3;
53 }
54 if (config->pmic_gpio != -1) {
55 ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL");
56 if (ret) {
57 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
58 "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret);
59 goto error4;
60 }
61 gpio_set_value_cansleep(config->pmic_gpio, 0);
62 }
63 DBG("gpio on");
64 } else {
65 gpio_free(config->ddc_clk_gpio);
66 gpio_free(config->ddc_data_gpio);
67 gpio_free(config->hpd_gpio);
68
69 if (config->pmic_gpio != -1) {
70 gpio_set_value_cansleep(config->pmic_gpio, 1);
71 gpio_free(config->pmic_gpio);
72 }
73 DBG("gpio off");
74 }
75
76 return 0;
77
78error4:
79 gpio_free(config->hpd_gpio);
80error3:
81 gpio_free(config->ddc_data_gpio);
82error2:
83 gpio_free(config->ddc_clk_gpio);
84error1:
85 return ret;
86}
87
88static int hpd_enable(struct hdmi_connector *hdmi_connector)
89{
90 struct hdmi *hdmi = hdmi_connector->hdmi;
91 struct drm_device *dev = hdmi_connector->base.dev;
92 struct hdmi_phy *phy = hdmi->phy;
93 uint32_t hpd_ctrl;
94 int ret;
95
96 ret = gpio_config(hdmi, true);
97 if (ret) {
98 dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
99 goto fail;
100 }
101
102 ret = clk_prepare_enable(hdmi->clk);
103 if (ret) {
104 dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
105 goto fail;
106 }
107
108 ret = clk_prepare_enable(hdmi->m_pclk);
109 if (ret) {
110 dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
111 goto fail;
112 }
113
114 ret = clk_prepare_enable(hdmi->s_pclk);
115 if (ret) {
116 dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
117 goto fail;
118 }
119
120 if (hdmi->mpp0)
121 ret = regulator_enable(hdmi->mpp0);
122 if (!ret)
123 ret = regulator_enable(hdmi->mvs);
124 if (ret) {
125 dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
126 goto fail;
127 }
128
129 hdmi_set_mode(hdmi, false);
130 phy->funcs->reset(phy);
131 hdmi_set_mode(hdmi, true);
132
133 hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
134
135 /* enable HPD events: */
136 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
137 HDMI_HPD_INT_CTRL_INT_CONNECT |
138 HDMI_HPD_INT_CTRL_INT_EN);
139
140 /* set timeout to 4.1ms (max) for hardware debounce */
141 hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
142 hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
143
144 /* Toggle HPD circuit to trigger HPD sense */
145 hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
146 ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
147 hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
148 HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
149
150 return 0;
151
152fail:
153 return ret;
154}
155
156static int hdp_disable(struct hdmi_connector *hdmi_connector)
157{
158 struct hdmi *hdmi = hdmi_connector->hdmi;
159 struct drm_device *dev = hdmi_connector->base.dev;
160 int ret = 0;
161
162 /* Disable HPD interrupt */
163 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
164
165 hdmi_set_mode(hdmi, false);
166
167 if (hdmi->mpp0)
168 ret = regulator_disable(hdmi->mpp0);
169 if (!ret)
170 ret = regulator_disable(hdmi->mvs);
171 if (ret) {
172 dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
173 goto fail;
174 }
175
176 clk_disable_unprepare(hdmi->clk);
177 clk_disable_unprepare(hdmi->m_pclk);
178 clk_disable_unprepare(hdmi->s_pclk);
179
180 ret = gpio_config(hdmi, false);
181 if (ret) {
182 dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
183 goto fail;
184 }
185
186 return 0;
187
188fail:
189 return ret;
190}
191
192void hdmi_connector_irq(struct drm_connector *connector)
193{
194 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
195 struct hdmi *hdmi = hdmi_connector->hdmi;
196 uint32_t hpd_int_status, hpd_int_ctrl;
197
198 /* Process HPD: */
199 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
200 hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
201
202 if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
203 (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
204 bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
205
206 DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
207
208 /* ack the irq: */
209 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
210 hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
211
212 drm_helper_hpd_irq_event(connector->dev);
213
214 /* detect disconnect if we are connected or visa versa: */
215 hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
216 if (!detected)
217 hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
218 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
219 }
220}
221
222static enum drm_connector_status hdmi_connector_detect(
223 struct drm_connector *connector, bool force)
224{
225 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
226 struct hdmi *hdmi = hdmi_connector->hdmi;
227 uint32_t hpd_int_status;
228 int retry = 20;
229
230 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
231
232 /* sense seems to in some cases be momentarily de-asserted, don't
233 * let that trick us into thinking the monitor is gone:
234 */
235 while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
236 mdelay(10);
237 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
238 DBG("status=%08x", hpd_int_status);
239 }
240
241 return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
242 connector_status_connected : connector_status_disconnected;
243}
244
245static void hdmi_connector_destroy(struct drm_connector *connector)
246{
247 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
248
249 hdp_disable(hdmi_connector);
250
251 drm_sysfs_connector_remove(connector);
252 drm_connector_cleanup(connector);
253
254 hdmi_unreference(hdmi_connector->hdmi);
255
256 kfree(hdmi_connector);
257}
258
259static int hdmi_connector_get_modes(struct drm_connector *connector)
260{
261 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
262 struct hdmi *hdmi = hdmi_connector->hdmi;
263 struct edid *edid;
264 uint32_t hdmi_ctrl;
265 int ret = 0;
266
267 hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
268 hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
269
270 edid = drm_get_edid(connector, hdmi->i2c);
271
272 hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
273
274 drm_mode_connector_update_edid_property(connector, edid);
275
276 if (edid) {
277 ret = drm_add_edid_modes(connector, edid);
278 kfree(edid);
279 }
280
281 return ret;
282}
283
284static int hdmi_connector_mode_valid(struct drm_connector *connector,
285 struct drm_display_mode *mode)
286{
287 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
288 struct msm_drm_private *priv = connector->dev->dev_private;
289 struct msm_kms *kms = priv->kms;
290 long actual, requested;
291
292 requested = 1000 * mode->clock;
293 actual = kms->funcs->round_pixclk(kms,
294 requested, hdmi_connector->hdmi->encoder);
295
296 DBG("requested=%ld, actual=%ld", requested, actual);
297
298 if (actual != requested)
299 return MODE_CLOCK_RANGE;
300
301 return 0;
302}
303
304static struct drm_encoder *
305hdmi_connector_best_encoder(struct drm_connector *connector)
306{
307 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
308 return hdmi_connector->hdmi->encoder;
309}
310
311static const struct drm_connector_funcs hdmi_connector_funcs = {
312 .dpms = drm_helper_connector_dpms,
313 .detect = hdmi_connector_detect,
314 .fill_modes = drm_helper_probe_single_connector_modes,
315 .destroy = hdmi_connector_destroy,
316};
317
318static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
319 .get_modes = hdmi_connector_get_modes,
320 .mode_valid = hdmi_connector_mode_valid,
321 .best_encoder = hdmi_connector_best_encoder,
322};
323
324/* initialize connector */
325struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
326{
327 struct drm_connector *connector = NULL;
328 struct hdmi_connector *hdmi_connector;
329 int ret;
330
331 hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
332 if (!hdmi_connector) {
333 ret = -ENOMEM;
334 goto fail;
335 }
336
337 hdmi_connector->hdmi = hdmi_reference(hdmi);
338
339 connector = &hdmi_connector->base;
340
341 drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs,
342 DRM_MODE_CONNECTOR_HDMIA);
343 drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
344
345 connector->polled = DRM_CONNECTOR_POLL_HPD;
346
347 connector->interlace_allowed = 1;
348 connector->doublescan_allowed = 0;
349
350 drm_sysfs_connector_add(connector);
351
352 ret = hpd_enable(hdmi_connector);
353 if (ret) {
354 dev_err(hdmi->dev->dev, "failed to enable HPD: %d\n", ret);
355 goto fail;
356 }
357
358 drm_mode_connector_attach_encoder(connector, hdmi->encoder);
359
360 return connector;
361
362fail:
363 if (connector)
364 hdmi_connector_destroy(connector);
365
366 return ERR_PTR(ret);
367}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
new file mode 100644
index 000000000000..f4ab7f70fed1
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -0,0 +1,281 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_i2c_adapter {
21 struct i2c_adapter base;
22 struct hdmi *hdmi;
23 bool sw_done;
24 wait_queue_head_t ddc_event;
25};
26#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
27
28static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
29{
30 struct hdmi *hdmi = hdmi_i2c->hdmi;
31
32 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
33 HDMI_DDC_CTRL_SW_STATUS_RESET);
34 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
35 HDMI_DDC_CTRL_SOFT_RESET);
36
37 hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
38 HDMI_DDC_SPEED_THRESHOLD(2) |
39 HDMI_DDC_SPEED_PRESCALE(10));
40
41 hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
42 HDMI_DDC_SETUP_TIMEOUT(0xff));
43
44 /* enable reference timer for 27us */
45 hdmi_write(hdmi, REG_HDMI_DDC_REF,
46 HDMI_DDC_REF_REFTIMER_ENABLE |
47 HDMI_DDC_REF_REFTIMER(27));
48}
49
50static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
51{
52 struct hdmi *hdmi = hdmi_i2c->hdmi;
53 struct drm_device *dev = hdmi->dev;
54 uint32_t retry = 0xffff;
55 uint32_t ddc_int_ctrl;
56
57 do {
58 --retry;
59
60 hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
61 HDMI_DDC_INT_CTRL_SW_DONE_ACK |
62 HDMI_DDC_INT_CTRL_SW_DONE_MASK);
63
64 ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
65
66 } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
67
68 if (!retry) {
69 dev_err(dev->dev, "timeout waiting for DDC\n");
70 return -ETIMEDOUT;
71 }
72
73 hdmi_i2c->sw_done = false;
74
75 return 0;
76}
77
78#define MAX_TRANSACTIONS 4
79
80static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
81{
82 struct hdmi *hdmi = hdmi_i2c->hdmi;
83
84 if (!hdmi_i2c->sw_done) {
85 uint32_t ddc_int_ctrl;
86
87 ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
88
89 if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) &&
90 (ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) {
91 hdmi_i2c->sw_done = true;
92 hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
93 HDMI_DDC_INT_CTRL_SW_DONE_ACK);
94 }
95 }
96
97 return hdmi_i2c->sw_done;
98}
99
100static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
101 struct i2c_msg *msgs, int num)
102{
103 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
104 struct hdmi *hdmi = hdmi_i2c->hdmi;
105 struct drm_device *dev = hdmi->dev;
106 static const uint32_t nack[] = {
107 HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1,
108 HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3,
109 };
110 int indices[MAX_TRANSACTIONS];
111 int ret, i, j, index = 0;
112 uint32_t ddc_status, ddc_data, i2c_trans;
113
114 num = min(num, MAX_TRANSACTIONS);
115
116 WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
117
118 if (num == 0)
119 return num;
120
121 init_ddc(hdmi_i2c);
122
123 ret = ddc_clear_irq(hdmi_i2c);
124 if (ret)
125 return ret;
126
127 for (i = 0; i < num; i++) {
128 struct i2c_msg *p = &msgs[i];
129 uint32_t raw_addr = p->addr << 1;
130
131 if (p->flags & I2C_M_RD)
132 raw_addr |= 1;
133
134 ddc_data = HDMI_DDC_DATA_DATA(raw_addr) |
135 HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
136
137 if (i == 0) {
138 ddc_data |= HDMI_DDC_DATA_INDEX(0) |
139 HDMI_DDC_DATA_INDEX_WRITE;
140 }
141
142 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
143 index++;
144
145 indices[i] = index;
146
147 if (p->flags & I2C_M_RD) {
148 index += p->len;
149 } else {
150 for (j = 0; j < p->len; j++) {
151 ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
152 HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
153 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
154 index++;
155 }
156 }
157
158 i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
159 HDMI_I2C_TRANSACTION_REG_RW(
160 (p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
161 HDMI_I2C_TRANSACTION_REG_START;
162
163 if (i == (num - 1))
164 i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP;
165
166 hdmi_write(hdmi, REG_HDMI_I2C_TRANSACTION(i), i2c_trans);
167 }
168
169 /* trigger the transfer: */
170 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
171 HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) |
172 HDMI_DDC_CTRL_GO);
173
174 ret = wait_event_timeout(hdmi_i2c->ddc_event, sw_done(hdmi_i2c), HZ/4);
175 if (ret <= 0) {
176 if (ret == 0)
177 ret = -ETIMEDOUT;
178 dev_warn(dev->dev, "DDC timeout: %d\n", ret);
179 DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
180 hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
181 hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
182 hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
183 return ret;
184 }
185
186 ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
187
188 /* read back results of any read transactions: */
189 for (i = 0; i < num; i++) {
190 struct i2c_msg *p = &msgs[i];
191
192 if (!(p->flags & I2C_M_RD))
193 continue;
194
195 /* check for NACK: */
196 if (ddc_status & nack[i]) {
197 DBG("ddc_status=%08x", ddc_status);
198 break;
199 }
200
201 ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) |
202 HDMI_DDC_DATA_INDEX(indices[i]) |
203 HDMI_DDC_DATA_INDEX_WRITE;
204
205 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
206
207 /* discard first byte: */
208 hdmi_read(hdmi, REG_HDMI_DDC_DATA);
209
210 for (j = 0; j < p->len; j++) {
211 ddc_data = hdmi_read(hdmi, REG_HDMI_DDC_DATA);
212 p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
213 }
214 }
215
216 return i;
217}
218
219static u32 hdmi_i2c_func(struct i2c_adapter *adapter)
220{
221 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
222}
223
224static const struct i2c_algorithm hdmi_i2c_algorithm = {
225 .master_xfer = hdmi_i2c_xfer,
226 .functionality = hdmi_i2c_func,
227};
228
229void hdmi_i2c_irq(struct i2c_adapter *i2c)
230{
231 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
232
233 if (sw_done(hdmi_i2c))
234 wake_up_all(&hdmi_i2c->ddc_event);
235}
236
237void hdmi_i2c_destroy(struct i2c_adapter *i2c)
238{
239 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
240 i2c_del_adapter(i2c);
241 kfree(hdmi_i2c);
242}
243
244struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
245{
246 struct drm_device *dev = hdmi->dev;
247 struct hdmi_i2c_adapter *hdmi_i2c;
248 struct i2c_adapter *i2c = NULL;
249 int ret;
250
251 hdmi_i2c = kzalloc(sizeof(*hdmi_i2c), GFP_KERNEL);
252 if (!hdmi_i2c) {
253 ret = -ENOMEM;
254 goto fail;
255 }
256
257 i2c = &hdmi_i2c->base;
258
259 hdmi_i2c->hdmi = hdmi;
260 init_waitqueue_head(&hdmi_i2c->ddc_event);
261
262
263 i2c->owner = THIS_MODULE;
264 i2c->class = I2C_CLASS_DDC;
265 snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
266 i2c->dev.parent = &hdmi->pdev->dev;
267 i2c->algo = &hdmi_i2c_algorithm;
268
269 ret = i2c_add_adapter(i2c);
270 if (ret) {
271 dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
272 goto fail;
273 }
274
275 return i2c;
276
277fail:
278 if (i2c)
279 hdmi_i2c_destroy(i2c);
280 return ERR_PTR(ret);
281}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
new file mode 100644
index 000000000000..e5b7ed5b8f01
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_phy_8960 {
21 struct hdmi_phy base;
22 struct hdmi *hdmi;
23};
24#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
25
26static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
27{
28 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
29 kfree(phy_8960);
30}
31
32static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
33{
34 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
35 struct hdmi *hdmi = phy_8960->hdmi;
36 unsigned int val;
37
38 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
39
40 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
41 /* pull low */
42 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
43 val & ~HDMI_PHY_CTRL_SW_RESET);
44 } else {
45 /* pull high */
46 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
47 val | HDMI_PHY_CTRL_SW_RESET);
48 }
49
50 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
51 /* pull low */
52 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
53 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
54 } else {
55 /* pull high */
56 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
57 val | HDMI_PHY_CTRL_SW_RESET_PLL);
58 }
59
60 msleep(100);
61
62 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
63 /* pull high */
64 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
65 val | HDMI_PHY_CTRL_SW_RESET);
66 } else {
67 /* pull low */
68 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
69 val & ~HDMI_PHY_CTRL_SW_RESET);
70 }
71
72 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
73 /* pull high */
74 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
75 val | HDMI_PHY_CTRL_SW_RESET_PLL);
76 } else {
77 /* pull low */
78 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
79 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
80 }
81}
82
83static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
84 unsigned long int pixclock)
85{
86 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
87 struct hdmi *hdmi = phy_8960->hdmi;
88
89 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
90 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
91 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
92 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00);
93 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00);
94 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00);
95 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00);
96 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00);
97 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00);
98 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00);
99 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20);
100}
101
102static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
103{
104 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
105 struct hdmi *hdmi = phy_8960->hdmi;
106
107 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
108}
109
110static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
111 .destroy = hdmi_phy_8960_destroy,
112 .reset = hdmi_phy_8960_reset,
113 .powerup = hdmi_phy_8960_powerup,
114 .powerdown = hdmi_phy_8960_powerdown,
115};
116
117struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
118{
119 struct hdmi_phy_8960 *phy_8960;
120 struct hdmi_phy *phy = NULL;
121 int ret;
122
123 phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
124 if (!phy_8960) {
125 ret = -ENOMEM;
126 goto fail;
127 }
128
129 phy = &phy_8960->base;
130
131 phy->funcs = &hdmi_phy_8960_funcs;
132
133 phy_8960->hdmi = hdmi;
134
135 return phy;
136
137fail:
138 if (phy)
139 hdmi_phy_8960_destroy(phy);
140 return ERR_PTR(ret);
141}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
new file mode 100644
index 000000000000..391433c1af7c
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_phy_8x60 {
21 struct hdmi_phy base;
22 struct hdmi *hdmi;
23};
24#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base)
25
26static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
27{
28 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
29 kfree(phy_8x60);
30}
31
32static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
33{
34 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
35 struct hdmi *hdmi = phy_8x60->hdmi;
36 unsigned int val;
37
38 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
39
40 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
41 /* pull low */
42 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
43 val & ~HDMI_PHY_CTRL_SW_RESET);
44 } else {
45 /* pull high */
46 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
47 val | HDMI_PHY_CTRL_SW_RESET);
48 }
49
50 msleep(100);
51
52 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
53 /* pull high */
54 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
55 val | HDMI_PHY_CTRL_SW_RESET);
56 } else {
57 /* pull low */
58 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
59 val & ~HDMI_PHY_CTRL_SW_RESET);
60 }
61}
62
63static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
64 unsigned long int pixclock)
65{
66 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
67 struct hdmi *hdmi = phy_8x60->hdmi;
68
69 /* De-serializer delay D/C for non-lbk mode: */
70 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0,
71 HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
72
73 if (pixclock == 27000000) {
74 /* video_format == HDMI_VFRMT_720x480p60_16_9 */
75 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
76 HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
77 HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
78 } else {
79 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
80 HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
81 HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
82 }
83
84 /* No matter what, start from the power down mode: */
85 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
86 HDMI_8x60_PHY_REG2_PD_PWRGEN |
87 HDMI_8x60_PHY_REG2_PD_PLL |
88 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
89 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
90 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
91 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
92 HDMI_8x60_PHY_REG2_PD_DESER);
93
94 /* Turn PowerGen on: */
95 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
96 HDMI_8x60_PHY_REG2_PD_PLL |
97 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
98 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
99 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
100 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
101 HDMI_8x60_PHY_REG2_PD_DESER);
102
103 /* Turn PLL power on: */
104 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
105 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
106 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
107 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
108 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
109 HDMI_8x60_PHY_REG2_PD_DESER);
110
111 /* Write to HIGH after PLL power down de-assert: */
112 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3,
113 HDMI_8x60_PHY_REG3_PLL_ENABLE);
114
115 /* ASIC power on; PHY REG9 = 0 */
116 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
117
118 /* Enable PLL lock detect, PLL lock det will go high after lock
119 * Enable the re-time logic
120 */
121 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
122 HDMI_8x60_PHY_REG12_RETIMING_EN |
123 HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
124
125 /* Drivers are on: */
126 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
127 HDMI_8x60_PHY_REG2_PD_DESER);
128
129 /* If the RX detector is needed: */
130 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
131 HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
132 HDMI_8x60_PHY_REG2_PD_DESER);
133
134 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0);
135 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0);
136 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0);
137 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0);
138 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0);
139 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
140 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0);
141 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0);
142
143 /* If we want to use lock enable based on counting: */
144 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
145 HDMI_8x60_PHY_REG12_RETIMING_EN |
146 HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
147 HDMI_8x60_PHY_REG12_FORCE_LOCK);
148}
149
150static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
151{
152 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
153 struct hdmi *hdmi = phy_8x60->hdmi;
154
155 /* Assert RESET PHY from controller */
156 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
157 HDMI_PHY_CTRL_SW_RESET);
158 udelay(10);
159 /* De-assert RESET PHY from controller */
160 hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0);
161 /* Turn off Driver */
162 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
163 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
164 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
165 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
166 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
167 HDMI_8x60_PHY_REG2_PD_DESER);
168 udelay(10);
169 /* Disable PLL */
170 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0);
171 /* Power down PHY, but keep RX-sense: */
172 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
173 HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
174 HDMI_8x60_PHY_REG2_PD_PWRGEN |
175 HDMI_8x60_PHY_REG2_PD_PLL |
176 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
177 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
178 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
179 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
180 HDMI_8x60_PHY_REG2_PD_DESER);
181}
182
183static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
184 .destroy = hdmi_phy_8x60_destroy,
185 .reset = hdmi_phy_8x60_reset,
186 .powerup = hdmi_phy_8x60_powerup,
187 .powerdown = hdmi_phy_8x60_powerdown,
188};
189
190struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi)
191{
192 struct hdmi_phy_8x60 *phy_8x60;
193 struct hdmi_phy *phy = NULL;
194 int ret;
195
196 phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL);
197 if (!phy_8x60) {
198 ret = -ENOMEM;
199 goto fail;
200 }
201
202 phy = &phy_8x60->base;
203
204 phy->funcs = &hdmi_phy_8x60_funcs;
205
206 phy_8x60->hdmi = hdmi;
207
208 return phy;
209
210fail:
211 if (phy)
212 hdmi_phy_8x60_destroy(phy);
213 return ERR_PTR(ret);
214}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
new file mode 100644
index 000000000000..bee36363bcd0
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -0,0 +1,50 @@
1#ifndef QFPROM_XML
2#define QFPROM_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238
46#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000
47#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000
48
49
50#endif /* QFPROM_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
new file mode 100644
index 000000000000..bbeeebe2db55
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
@@ -0,0 +1,1061 @@
1#ifndef MDP4_XML
2#define MDP4_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum mpd4_bpc {
46 BPC1 = 0,
47 BPC5 = 1,
48 BPC6 = 2,
49 BPC8 = 3,
50};
51
52enum mpd4_bpc_alpha {
53 BPC1A = 0,
54 BPC4A = 1,
55 BPC6A = 2,
56 BPC8A = 3,
57};
58
59enum mpd4_alpha_type {
60 FG_CONST = 0,
61 BG_CONST = 1,
62 FG_PIXEL = 2,
63 BG_PIXEL = 3,
64};
65
66enum mpd4_pipe {
67 VG1 = 0,
68 VG2 = 1,
69 RGB1 = 2,
70 RGB2 = 3,
71 RGB3 = 4,
72 VG3 = 5,
73 VG4 = 6,
74};
75
76enum mpd4_mixer {
77 MIXER0 = 0,
78 MIXER1 = 1,
79 MIXER2 = 2,
80};
81
82enum mpd4_mixer_stage_id {
83 STAGE_UNUSED = 0,
84 STAGE_BASE = 1,
85 STAGE0 = 2,
86 STAGE1 = 3,
87 STAGE2 = 4,
88 STAGE3 = 5,
89};
90
91enum mdp4_intf {
92 INTF_LCDC_DTV = 0,
93 INTF_DSI_VIDEO = 1,
94 INTF_DSI_CMD = 2,
95 INTF_EBI2_TV = 3,
96};
97
98enum mdp4_cursor_format {
99 CURSOR_ARGB = 1,
100 CURSOR_XRGB = 2,
101};
102
103enum mdp4_dma {
104 DMA_P = 0,
105 DMA_S = 1,
106 DMA_E = 2,
107};
108
109#define MDP4_IRQ_OVERLAY0_DONE 0x00000001
110#define MDP4_IRQ_OVERLAY1_DONE 0x00000002
111#define MDP4_IRQ_DMA_S_DONE 0x00000004
112#define MDP4_IRQ_DMA_E_DONE 0x00000008
113#define MDP4_IRQ_DMA_P_DONE 0x00000010
114#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020
115#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040
116#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080
117#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100
118#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200
119#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400
120#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800
121#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000
122#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000
123#define MDP4_IRQ_OVERLAY2_DONE 0x40000000
124#define REG_MDP4_VERSION 0x00000000
125#define MDP4_VERSION_MINOR__MASK 0x00ff0000
126#define MDP4_VERSION_MINOR__SHIFT 16
127static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
128{
129 return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
130}
131#define MDP4_VERSION_MAJOR__MASK 0xff000000
132#define MDP4_VERSION_MAJOR__SHIFT 24
133static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
134{
135 return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
136}
137
138#define REG_MDP4_OVLP0_KICK 0x00000004
139
140#define REG_MDP4_OVLP1_KICK 0x00000008
141
142#define REG_MDP4_OVLP2_KICK 0x000000d0
143
144#define REG_MDP4_DMA_P_KICK 0x0000000c
145
146#define REG_MDP4_DMA_S_KICK 0x00000010
147
148#define REG_MDP4_DMA_E_KICK 0x00000014
149
150#define REG_MDP4_DISP_STATUS 0x00000018
151
152#define REG_MDP4_DISP_INTF_SEL 0x00000038
153#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003
154#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0
155static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
156{
157 return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
158}
159#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c
160#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2
161static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
162{
163 return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
164}
165#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030
166#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4
167static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
168{
169 return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
170}
171#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040
172#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080
173
174#define REG_MDP4_RESET_STATUS 0x0000003c
175
176#define REG_MDP4_READ_CNFG 0x0000004c
177
178#define REG_MDP4_INTR_ENABLE 0x00000050
179
180#define REG_MDP4_INTR_STATUS 0x00000054
181
182#define REG_MDP4_INTR_CLEAR 0x00000058
183
184#define REG_MDP4_EBI2_LCD0 0x00000060
185
186#define REG_MDP4_EBI2_LCD1 0x00000064
187
188#define REG_MDP4_PORTMAP_MODE 0x00000070
189
190#define REG_MDP4_CS_CONTROLLER0 0x000000c0
191
192#define REG_MDP4_CS_CONTROLLER1 0x000000c4
193
194#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
195#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
196#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
197static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
198{
199 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
200}
201#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
202#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
203#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
204static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
205{
206 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
207}
208#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
209#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
210#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
211static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
212{
213 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
214}
215#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
216#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
217#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
218static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
219{
220 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
221}
222#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
223#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
224#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
225static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
226{
227 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
228}
229#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
230#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
231#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
232static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
233{
234 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
235}
236#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
237#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
238#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
239static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
240{
241 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
242}
243#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
244#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
245#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
246static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
247{
248 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
249}
250#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000
251
252#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc
253
254#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
255#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
256#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
257static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
258{
259 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
260}
261#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
262#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
263#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
264static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
265{
266 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
267}
268#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
269#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
270#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
271static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
272{
273 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
274}
275#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
276#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
277#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
278static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
279{
280 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
281}
282#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
283#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
284#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
285static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
286{
287 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
288}
289#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
290#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
291#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
292static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
293{
294 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
295}
296#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
297#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
298#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
299static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
300{
301 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
302}
303#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
304#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
305#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
306static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
307{
308 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
309}
310#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000
311
312#define REG_MDP4_VG2_SRC_FORMAT 0x00030050
313
314#define REG_MDP4_VG2_CONST_COLOR 0x00031008
315
316#define REG_MDP4_OVERLAY_FLUSH 0x00018000
317#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001
318#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002
319#define MDP4_OVERLAY_FLUSH_VG1 0x00000004
320#define MDP4_OVERLAY_FLUSH_VG2 0x00000008
321#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010
322#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020
323
324static inline uint32_t __offset_OVLP(uint32_t idx)
325{
326 switch (idx) {
327 case 0: return 0x00010000;
328 case 1: return 0x00018000;
329 case 2: return 0x00088000;
330 default: return INVALID_IDX(idx);
331 }
332}
333static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
334
335static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
336
337static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
338#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000
339#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16
340static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
341{
342 return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
343}
344#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff
345#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0
346static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
347{
348 return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
349}
350
351static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
352
353static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
354
355static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
356
357static inline uint32_t __offset_STAGE(uint32_t idx)
358{
359 switch (idx) {
360 case 0: return 0x00000104;
361 case 1: return 0x00000124;
362 case 2: return 0x00000144;
363 case 3: return 0x00000160;
364 default: return INVALID_IDX(idx);
365 }
366}
367static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
368
369static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
370#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
371#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
372static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
373{
374 return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
375}
376#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004
377#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
378#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
379#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
380static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val)
381{
382 return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
383}
384#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040
385#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080
386#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100
387#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200
388
389static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
390
391static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
392
393static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
394
395static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
396
397static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
398
399static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
400
401static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
402{
403 switch (idx) {
404 case 0: return 0x00001004;
405 case 1: return 0x00001404;
406 case 2: return 0x00001804;
407 case 3: return 0x00001b84;
408 default: return INVALID_IDX(idx);
409 }
410}
411static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
412
413static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
414#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001
415
416static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
417
418static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
419
420static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
421
422static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
423
424static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
425
426static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
427
428
429static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
430
431static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
432
433static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
434
435static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
436
437static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
438
439static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
440
441static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
442
443static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
444
445static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
446
447static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
448
449#define REG_MDP4_DMA_P_OP_MODE 0x00090070
450
451static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
452
453static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
454
455static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
456
457#define REG_MDP4_DMA_S_OP_MODE 0x000a0028
458
459static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
460
461static inline uint32_t __offset_DMA(enum mdp4_dma idx)
462{
463 switch (idx) {
464 case DMA_P: return 0x00090000;
465 case DMA_S: return 0x000a0000;
466 case DMA_E: return 0x000b0000;
467 default: return INVALID_IDX(idx);
468 }
469}
470static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
471
472static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
473#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
474#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
475static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val)
476{
477 return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
478}
479#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
480#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
481static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val)
482{
483 return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
484}
485#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
486#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
487static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val)
488{
489 return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
490}
491#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080
492#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00
493#define MDP4_DMA_CONFIG_PACK__SHIFT 8
494static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
495{
496 return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
497}
498#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000
499#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000
500
501static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
502#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000
503#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16
504static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
505{
506 return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
507}
508#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff
509#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0
510static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
511{
512 return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
513}
514
515static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
516
517static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
518
519static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
520#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000
521#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16
522static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
523{
524 return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
525}
526#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff
527#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0
528static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
529{
530 return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
531}
532
533static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
534#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f
535#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0
536static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
537{
538 return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
539}
540#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000
541#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16
542static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
543{
544 return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
545}
546
547static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
548
549static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
550#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff
551#define MDP4_DMA_CURSOR_POS_X__SHIFT 0
552static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
553{
554 return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
555}
556#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000
557#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16
558static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
559{
560 return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
561}
562
563static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
564#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001
565#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006
566#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1
567static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
568{
569 return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
570}
571#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008
572
573static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
574
575static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
576
577static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
578
579static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
580
581static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
582
583
584static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
585
586static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
587
588static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
589
590static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
591
592static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
593
594static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
595
596static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
597
598static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
599
600static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
601
602static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
603
604static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
605
606static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
607#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
608#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
609static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
610{
611 return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
612}
613#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
614#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0
615static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
616{
617 return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
618}
619
620static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; }
621#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
622#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
623static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
624{
625 return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
626}
627#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff
628#define MDP4_PIPE_SRC_XY_X__SHIFT 0
629static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
630{
631 return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
632}
633
634static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; }
635#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
636#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
637static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
638{
639 return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
640}
641#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff
642#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0
643static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
644{
645 return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
646}
647
648static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; }
649#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
650#define MDP4_PIPE_DST_XY_Y__SHIFT 16
651static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
652{
653 return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
654}
655#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff
656#define MDP4_PIPE_DST_XY_X__SHIFT 0
657static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
658{
659 return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
660}
661
662static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; }
663
664static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; }
665
666static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; }
667
668static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; }
669#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
670#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
671static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
672{
673 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
674}
675#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
676#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16
677static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
678{
679 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
680}
681
682static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; }
683#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
684#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
685static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
686{
687 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
688}
689#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
690#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16
691static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
692{
693 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
694}
695
696static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; }
697#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
698#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
699static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
700{
701 return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK;
702}
703#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
704#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT 0
705static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
706{
707 return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
708}
709
710static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; }
711#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
712#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
713static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val)
714{
715 return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
716}
717#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
718#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
719static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val)
720{
721 return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
722}
723#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
724#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
725static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val)
726{
727 return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
728}
729#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
730#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
731static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val)
732{
733 return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
734}
735#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
736#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
737#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9
738static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
739{
740 return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
741}
742#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000
743#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000
744#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13
745static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
746{
747 return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
748}
749#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
750#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
751#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
752
753static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; }
754#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
755#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
756static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
757{
758 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
759}
760#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
761#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
762static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
763{
764 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
765}
766#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
767#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
768static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
769{
770 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
771}
772#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
773#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
774static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
775{
776 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
777}
778
779static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; }
780#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
781#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
782#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
783#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
784#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
785#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000
786#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000
787#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000
788#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000
789#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
790#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
791
792static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; }
793
794static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; }
795
796static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; }
797
798static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; }
799
800static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; }
801
802
803static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
804
805static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
806
807static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
808
809static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
810
811static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
812
813static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
814
815static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
816
817static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
818
819static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
820
821static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
822
823#define REG_MDP4_LCDC 0x000c0000
824
825#define REG_MDP4_LCDC_ENABLE 0x000c0000
826
827#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004
828#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
829#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0
830static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
831{
832 return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
833}
834#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000
835#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16
836static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
837{
838 return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
839}
840
841#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008
842
843#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c
844
845#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010
846#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff
847#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0
848static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
849{
850 return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
851}
852#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000
853#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16
854static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
855{
856 return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
857}
858
859#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014
860
861#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018
862
863#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c
864#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff
865#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0
866static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
867{
868 return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
869}
870#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000
871#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16
872static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
873{
874 return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
875}
876#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
877
878#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020
879
880#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024
881
882#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028
883
884#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c
885#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
886#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0
887static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
888{
889 return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
890}
891#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
892
893#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030
894
895#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034
896
897#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038
898#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001
899#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
900#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
901
902#define REG_MDP4_DTV 0x000d0000
903
904#define REG_MDP4_DTV_ENABLE 0x000d0000
905
906#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004
907#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
908#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0
909static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
910{
911 return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
912}
913#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000
914#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16
915static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
916{
917 return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
918}
919
920#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008
921
922#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c
923
924#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018
925#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff
926#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0
927static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
928{
929 return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
930}
931#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000
932#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16
933static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
934{
935 return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
936}
937
938#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c
939
940#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020
941
942#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c
943#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff
944#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0
945static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
946{
947 return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
948}
949#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000
950#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16
951static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
952{
953 return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
954}
955#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
956
957#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030
958
959#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038
960
961#define REG_MDP4_DTV_BORDER_CLR 0x000d0040
962
963#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044
964#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
965#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0
966static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
967{
968 return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
969}
970#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
971
972#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048
973
974#define REG_MDP4_DTV_TEST_CNTL 0x000d004c
975
976#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050
977#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001
978#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002
979#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004
980
981#define REG_MDP4_DSI 0x000e0000
982
983#define REG_MDP4_DSI_ENABLE 0x000e0000
984
985#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004
986#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
987#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0
988static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
989{
990 return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
991}
992#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000
993#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16
994static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
995{
996 return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
997}
998
999#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008
1000
1001#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c
1002
1003#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010
1004#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff
1005#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0
1006static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
1007{
1008 return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
1009}
1010#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000
1011#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16
1012static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
1013{
1014 return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
1015}
1016
1017#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014
1018
1019#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018
1020
1021#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c
1022#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff
1023#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0
1024static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
1025{
1026 return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
1027}
1028#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000
1029#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16
1030static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
1031{
1032 return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
1033}
1034#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
1035
1036#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020
1037
1038#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024
1039
1040#define REG_MDP4_DSI_BORDER_CLR 0x000e0028
1041
1042#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c
1043#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
1044#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0
1045static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
1046{
1047 return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
1048}
1049#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
1050
1051#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030
1052
1053#define REG_MDP4_DSI_TEST_CNTL 0x000e0034
1054
1055#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038
1056#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001
1057#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002
1058#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004
1059
1060
1061#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
new file mode 100644
index 000000000000..de6bea297cda
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -0,0 +1,685 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp4_kms.h"
19
20#include <drm/drm_mode.h>
21#include "drm_crtc.h"
22#include "drm_crtc_helper.h"
23#include "drm_flip_work.h"
24
25struct mdp4_crtc {
26 struct drm_crtc base;
27 char name[8];
28 struct drm_plane *plane;
29 int id;
30 int ovlp;
31 enum mdp4_dma dma;
32 bool enabled;
33
34 /* which mixer/encoder we route output to: */
35 int mixer;
36
37 struct {
38 spinlock_t lock;
39 bool stale;
40 uint32_t width, height;
41
42 /* next cursor to scan-out: */
43 uint32_t next_iova;
44 struct drm_gem_object *next_bo;
45
46 /* current cursor being scanned out: */
47 struct drm_gem_object *scanout_bo;
48 } cursor;
49
50
51 /* if there is a pending flip, these will be non-null: */
52 struct drm_pending_vblank_event *event;
53 struct work_struct pageflip_work;
54
55 /* the fb that we currently hold a scanout ref to: */
56 struct drm_framebuffer *fb;
57
58 /* for unref'ing framebuffers after scanout completes: */
59 struct drm_flip_work unref_fb_work;
60
61 /* for unref'ing cursor bo's after scanout completes: */
62 struct drm_flip_work unref_cursor_work;
63
64 struct mdp4_irq vblank;
65 struct mdp4_irq err;
66};
67#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
68
69static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
70{
71 struct msm_drm_private *priv = crtc->dev->dev_private;
72 return to_mdp4_kms(priv->kms);
73}
74
75static void update_fb(struct drm_crtc *crtc, bool async,
76 struct drm_framebuffer *new_fb)
77{
78 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
79 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
80
81 if (old_fb)
82 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
83
84 /* grab reference to incoming scanout fb: */
85 drm_framebuffer_reference(new_fb);
86 mdp4_crtc->base.fb = new_fb;
87 mdp4_crtc->fb = new_fb;
88
89 if (!async) {
90 /* enable vblank to pick up the old_fb */
91 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
92 }
93}
94
95static void complete_flip(struct drm_crtc *crtc, bool canceled)
96{
97 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
98 struct drm_device *dev = crtc->dev;
99 struct drm_pending_vblank_event *event;
100 unsigned long flags;
101
102 spin_lock_irqsave(&dev->event_lock, flags);
103 event = mdp4_crtc->event;
104 if (event) {
105 mdp4_crtc->event = NULL;
106 if (canceled)
107 event->base.destroy(&event->base);
108 else
109 drm_send_vblank_event(dev, mdp4_crtc->id, event);
110 }
111 spin_unlock_irqrestore(&dev->event_lock, flags);
112}
113
114static void crtc_flush(struct drm_crtc *crtc)
115{
116 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
117 struct mdp4_kms *mdp4_kms = get_kms(crtc);
118 uint32_t flush = 0;
119
120 flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
121 flush |= ovlp2flush(mdp4_crtc->ovlp);
122
123 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
124
125 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
126}
127
128static void pageflip_worker(struct work_struct *work)
129{
130 struct mdp4_crtc *mdp4_crtc =
131 container_of(work, struct mdp4_crtc, pageflip_work);
132 struct drm_crtc *crtc = &mdp4_crtc->base;
133
134 mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
135 crtc_flush(crtc);
136
137 /* enable vblank to complete flip: */
138 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
139}
140
141static void unref_fb_worker(struct drm_flip_work *work, void *val)
142{
143 struct mdp4_crtc *mdp4_crtc =
144 container_of(work, struct mdp4_crtc, unref_fb_work);
145 struct drm_device *dev = mdp4_crtc->base.dev;
146
147 mutex_lock(&dev->mode_config.mutex);
148 drm_framebuffer_unreference(val);
149 mutex_unlock(&dev->mode_config.mutex);
150}
151
152static void unref_cursor_worker(struct drm_flip_work *work, void *val)
153{
154 struct mdp4_crtc *mdp4_crtc =
155 container_of(work, struct mdp4_crtc, unref_cursor_work);
156 struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
157
158 msm_gem_put_iova(val, mdp4_kms->id);
159 drm_gem_object_unreference_unlocked(val);
160}
161
162static void mdp4_crtc_destroy(struct drm_crtc *crtc)
163{
164 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
165
166 mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
167
168 drm_crtc_cleanup(crtc);
169 drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
170 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
171
172 kfree(mdp4_crtc);
173}
174
175static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
176{
177 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
178 struct mdp4_kms *mdp4_kms = get_kms(crtc);
179 bool enabled = (mode == DRM_MODE_DPMS_ON);
180
181 DBG("%s: mode=%d", mdp4_crtc->name, mode);
182
183 if (enabled != mdp4_crtc->enabled) {
184 if (enabled) {
185 mdp4_enable(mdp4_kms);
186 mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
187 } else {
188 mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
189 mdp4_disable(mdp4_kms);
190 }
191 mdp4_crtc->enabled = enabled;
192 }
193}
194
195static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
196 const struct drm_display_mode *mode,
197 struct drm_display_mode *adjusted_mode)
198{
199 return true;
200}
201
202static void blend_setup(struct drm_crtc *crtc)
203{
204 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
205 struct mdp4_kms *mdp4_kms = get_kms(crtc);
206 int i, ovlp = mdp4_crtc->ovlp;
207 uint32_t mixer_cfg = 0;
208
209 /*
210 * This probably would also need to be triggered by any attached
211 * plane when it changes.. for now since we are only using a single
212 * private plane, the configuration is hard-coded:
213 */
214
215 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
216 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
217 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
218 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
219
220 for (i = 0; i < 4; i++) {
221 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
222 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
223 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
224 MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
225 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST));
226 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
227 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
228 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
229 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
230 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
231 }
232
233 /* TODO single register for all CRTCs, so this won't work properly
234 * when multiple CRTCs are active..
235 */
236 switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
237 case VG1:
238 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
239 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
240 break;
241 case VG2:
242 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
243 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
244 break;
245 case RGB1:
246 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
247 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
248 break;
249 case RGB2:
250 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
251 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
252 break;
253 case RGB3:
254 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
255 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
256 break;
257 case VG3:
258 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
259 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
260 break;
261 case VG4:
262 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
263 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
264 break;
265 default:
266 WARN_ON("invalid pipe");
267 break;
268 }
269 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
270}
271
272static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
273 struct drm_display_mode *mode,
274 struct drm_display_mode *adjusted_mode,
275 int x, int y,
276 struct drm_framebuffer *old_fb)
277{
278 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
279 struct mdp4_kms *mdp4_kms = get_kms(crtc);
280 enum mdp4_dma dma = mdp4_crtc->dma;
281 int ret, ovlp = mdp4_crtc->ovlp;
282
283 mode = adjusted_mode;
284
285 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
286 mdp4_crtc->name, mode->base.id, mode->name,
287 mode->vrefresh, mode->clock,
288 mode->hdisplay, mode->hsync_start,
289 mode->hsync_end, mode->htotal,
290 mode->vdisplay, mode->vsync_start,
291 mode->vsync_end, mode->vtotal,
292 mode->type, mode->flags);
293
294 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
295 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
296 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
297
298 /* take data from pipe: */
299 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
300 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
301 crtc->fb->pitches[0]);
302 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
303 MDP4_DMA_DST_SIZE_WIDTH(0) |
304 MDP4_DMA_DST_SIZE_HEIGHT(0));
305
306 mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
307 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
308 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
309 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
310 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
311 crtc->fb->pitches[0]);
312
313 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
314
315 update_fb(crtc, false, crtc->fb);
316
317 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
318 0, 0, mode->hdisplay, mode->vdisplay,
319 x << 16, y << 16,
320 mode->hdisplay << 16, mode->vdisplay << 16);
321 if (ret) {
322 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
323 mdp4_crtc->name, ret);
324 return ret;
325 }
326
327 if (dma == DMA_E) {
328 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
329 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
330 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
331 }
332
333 return 0;
334}
335
336static void mdp4_crtc_prepare(struct drm_crtc *crtc)
337{
338 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
339 DBG("%s", mdp4_crtc->name);
340 /* make sure we hold a ref to mdp clks while setting up mode: */
341 mdp4_enable(get_kms(crtc));
342 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
343}
344
345static void mdp4_crtc_commit(struct drm_crtc *crtc)
346{
347 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
348 crtc_flush(crtc);
349 /* drop the ref to mdp clk's that we got in prepare: */
350 mdp4_disable(get_kms(crtc));
351}
352
353static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
354 struct drm_framebuffer *old_fb)
355{
356 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
357 struct drm_plane *plane = mdp4_crtc->plane;
358 struct drm_display_mode *mode = &crtc->mode;
359
360 update_fb(crtc, false, crtc->fb);
361
362 return mdp4_plane_mode_set(plane, crtc, crtc->fb,
363 0, 0, mode->hdisplay, mode->vdisplay,
364 x << 16, y << 16,
365 mode->hdisplay << 16, mode->vdisplay << 16);
366}
367
368static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
369{
370}
371
372static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
373 struct drm_framebuffer *new_fb,
374 struct drm_pending_vblank_event *event,
375 uint32_t page_flip_flags)
376{
377 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
378 struct drm_device *dev = crtc->dev;
379 struct drm_gem_object *obj;
380
381 if (mdp4_crtc->event) {
382 dev_err(dev->dev, "already pending flip!\n");
383 return -EBUSY;
384 }
385
386 obj = msm_framebuffer_bo(new_fb, 0);
387
388 mdp4_crtc->event = event;
389 update_fb(crtc, true, new_fb);
390
391 return msm_gem_queue_inactive_work(obj,
392 &mdp4_crtc->pageflip_work);
393}
394
395static int mdp4_crtc_set_property(struct drm_crtc *crtc,
396 struct drm_property *property, uint64_t val)
397{
398 // XXX
399 return -EINVAL;
400}
401
402#define CURSOR_WIDTH 64
403#define CURSOR_HEIGHT 64
404
405/* called from IRQ to update cursor related registers (if needed). The
406 * cursor registers, other than x/y position, appear not to be double
407 * buffered, and changing them other than from vblank seems to trigger
408 * underflow.
409 */
410static void update_cursor(struct drm_crtc *crtc)
411{
412 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
413 enum mdp4_dma dma = mdp4_crtc->dma;
414 unsigned long flags;
415
416 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
417 if (mdp4_crtc->cursor.stale) {
418 struct mdp4_kms *mdp4_kms = get_kms(crtc);
419 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
420 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
421 uint32_t iova = mdp4_crtc->cursor.next_iova;
422
423 if (next_bo) {
424 /* take a obj ref + iova ref when we start scanning out: */
425 drm_gem_object_reference(next_bo);
426 msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
427
428 /* enable cursor: */
429 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
430 MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
431 MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
432 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
433 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
434 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
435 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
436 } else {
437 /* disable cursor: */
438 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
439 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
440 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
441 }
442
443 /* and drop the iova ref + obj rev when done scanning out: */
444 if (prev_bo)
445 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
446
447 mdp4_crtc->cursor.scanout_bo = next_bo;
448 mdp4_crtc->cursor.stale = false;
449 }
450 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
451}
452
453static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
454 struct drm_file *file_priv, uint32_t handle,
455 uint32_t width, uint32_t height)
456{
457 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
458 struct mdp4_kms *mdp4_kms = get_kms(crtc);
459 struct drm_device *dev = crtc->dev;
460 struct drm_gem_object *cursor_bo, *old_bo;
461 unsigned long flags;
462 uint32_t iova;
463 int ret;
464
465 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
466 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
467 return -EINVAL;
468 }
469
470 if (handle) {
471 cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
472 if (!cursor_bo)
473 return -ENOENT;
474 } else {
475 cursor_bo = NULL;
476 }
477
478 if (cursor_bo) {
479 ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
480 if (ret)
481 goto fail;
482 } else {
483 iova = 0;
484 }
485
486 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
487 old_bo = mdp4_crtc->cursor.next_bo;
488 mdp4_crtc->cursor.next_bo = cursor_bo;
489 mdp4_crtc->cursor.next_iova = iova;
490 mdp4_crtc->cursor.width = width;
491 mdp4_crtc->cursor.height = height;
492 mdp4_crtc->cursor.stale = true;
493 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
494
495 if (old_bo) {
496 /* drop our previous reference: */
497 msm_gem_put_iova(old_bo, mdp4_kms->id);
498 drm_gem_object_unreference_unlocked(old_bo);
499 }
500
501 return 0;
502
503fail:
504 drm_gem_object_unreference_unlocked(cursor_bo);
505 return ret;
506}
507
508static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
509{
510 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
511 struct mdp4_kms *mdp4_kms = get_kms(crtc);
512 enum mdp4_dma dma = mdp4_crtc->dma;
513
514 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
515 MDP4_DMA_CURSOR_POS_X(x) |
516 MDP4_DMA_CURSOR_POS_Y(y));
517
518 return 0;
519}
520
521static const struct drm_crtc_funcs mdp4_crtc_funcs = {
522 .set_config = drm_crtc_helper_set_config,
523 .destroy = mdp4_crtc_destroy,
524 .page_flip = mdp4_crtc_page_flip,
525 .set_property = mdp4_crtc_set_property,
526 .cursor_set = mdp4_crtc_cursor_set,
527 .cursor_move = mdp4_crtc_cursor_move,
528};
529
530static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
531 .dpms = mdp4_crtc_dpms,
532 .mode_fixup = mdp4_crtc_mode_fixup,
533 .mode_set = mdp4_crtc_mode_set,
534 .prepare = mdp4_crtc_prepare,
535 .commit = mdp4_crtc_commit,
536 .mode_set_base = mdp4_crtc_mode_set_base,
537 .load_lut = mdp4_crtc_load_lut,
538};
539
540static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
541{
542 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
543 struct drm_crtc *crtc = &mdp4_crtc->base;
544 struct msm_drm_private *priv = crtc->dev->dev_private;
545
546 update_cursor(crtc);
547 complete_flip(crtc, false);
548 mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
549
550 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
551 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
552}
553
554static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
555{
556 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
557 struct drm_crtc *crtc = &mdp4_crtc->base;
558 DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
559 crtc_flush(crtc);
560}
561
562uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
563{
564 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
565 return mdp4_crtc->vblank.irqmask;
566}
567
568void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc)
569{
570 complete_flip(crtc, true);
571}
572
573/* set dma config, ie. the format the encoder wants. */
574void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
575{
576 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
577 struct mdp4_kms *mdp4_kms = get_kms(crtc);
578
579 mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
580}
581
582/* set interface for routing crtc->encoder: */
583void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
584{
585 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
586 struct mdp4_kms *mdp4_kms = get_kms(crtc);
587 uint32_t intf_sel;
588
589 intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
590
591 switch (mdp4_crtc->dma) {
592 case DMA_P:
593 intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
594 intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
595 break;
596 case DMA_S:
597 intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
598 intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
599 break;
600 case DMA_E:
601 intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
602 intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
603 break;
604 }
605
606 if (intf == INTF_DSI_VIDEO) {
607 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
608 intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
609 mdp4_crtc->mixer = 0;
610 } else if (intf == INTF_DSI_CMD) {
611 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
612 intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
613 mdp4_crtc->mixer = 0;
614 } else if (intf == INTF_LCDC_DTV){
615 mdp4_crtc->mixer = 1;
616 }
617
618 blend_setup(crtc);
619
620 DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
621
622 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
623}
624
625static const char *dma_names[] = {
626 "DMA_P", "DMA_S", "DMA_E",
627};
628
629/* initialize crtc */
630struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
631 struct drm_plane *plane, int id, int ovlp_id,
632 enum mdp4_dma dma_id)
633{
634 struct drm_crtc *crtc = NULL;
635 struct mdp4_crtc *mdp4_crtc;
636 int ret;
637
638 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
639 if (!mdp4_crtc) {
640 ret = -ENOMEM;
641 goto fail;
642 }
643
644 crtc = &mdp4_crtc->base;
645
646 mdp4_crtc->plane = plane;
647 mdp4_crtc->plane->crtc = crtc;
648
649 mdp4_crtc->ovlp = ovlp_id;
650 mdp4_crtc->dma = dma_id;
651
652 mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
653 mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
654
655 mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
656 mdp4_crtc->err.irq = mdp4_crtc_err_irq;
657
658 snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
659 dma_names[dma_id], ovlp_id);
660
661 spin_lock_init(&mdp4_crtc->cursor.lock);
662
663 ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
664 "unref fb", unref_fb_worker);
665 if (ret)
666 goto fail;
667
668 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
669 "unref cursor", unref_cursor_worker);
670
671 INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker);
672
673 drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
674 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
675
676 mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
677
678 return crtc;
679
680fail:
681 if (crtc)
682 mdp4_crtc_destroy(crtc);
683
684 return ERR_PTR(ret);
685}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
new file mode 100644
index 000000000000..5e0dcae70ab5
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
@@ -0,0 +1,305 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <mach/clk.h>
19
20#include "mdp4_kms.h"
21
22#include "drm_crtc.h"
23#include "drm_crtc_helper.h"
24
25
26struct mdp4_dtv_encoder {
27 struct drm_encoder base;
28 struct clk *src_clk;
29 struct clk *hdmi_clk;
30 struct clk *mdp_clk;
31 unsigned long int pixclock;
32 bool enabled;
33 uint32_t bsc;
34};
35#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
36
37static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
38{
39 struct msm_drm_private *priv = encoder->dev->dev_private;
40 return to_mdp4_kms(priv->kms);
41}
42
43#ifdef CONFIG_MSM_BUS_SCALING
44#include <mach/board.h>
45/* not ironically named at all.. no, really.. */
46static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
47{
48 struct drm_device *dev = mdp4_dtv_encoder->base.dev;
49 struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
50
51 if (!dtv_pdata) {
52 dev_err(dev->dev, "could not find dtv pdata\n");
53 return;
54 }
55
56 if (dtv_pdata->bus_scale_table) {
57 mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
58 dtv_pdata->bus_scale_table);
59 DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
60 DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
61 if (dtv_pdata->lcdc_power_save)
62 dtv_pdata->lcdc_power_save(1);
63 }
64}
65
66static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
67{
68 if (mdp4_dtv_encoder->bsc) {
69 msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
70 mdp4_dtv_encoder->bsc = 0;
71 }
72}
73
74static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
75{
76 if (mdp4_dtv_encoder->bsc) {
77 DBG("set bus scaling: %d", idx);
78 msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
79 }
80}
81#else
82static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
83static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
84static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
85#endif
86
87static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
88{
89 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
90 bs_fini(mdp4_dtv_encoder);
91 drm_encoder_cleanup(encoder);
92 kfree(mdp4_dtv_encoder);
93}
94
95static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
96 .destroy = mdp4_dtv_encoder_destroy,
97};
98
99static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
100{
101 struct drm_device *dev = encoder->dev;
102 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
103 struct mdp4_kms *mdp4_kms = get_kms(encoder);
104 bool enabled = (mode == DRM_MODE_DPMS_ON);
105
106 DBG("mode=%d", mode);
107
108 if (enabled == mdp4_dtv_encoder->enabled)
109 return;
110
111 if (enabled) {
112 unsigned long pc = mdp4_dtv_encoder->pixclock;
113 int ret;
114
115 bs_set(mdp4_dtv_encoder, 1);
116
117 DBG("setting src_clk=%lu", pc);
118
119 ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
120 if (ret)
121 dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
122 clk_prepare_enable(mdp4_dtv_encoder->src_clk);
123 ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
124 if (ret)
125 dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
126 ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
127 if (ret)
128 dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
129
130 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
131 } else {
132 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
133
134 /*
135 * Wait for a vsync so we know the ENABLE=0 latched before
136 * the (connector) source of the vsync's gets disabled,
137 * otherwise we end up in a funny state if we re-enable
138 * before the disable latches, which results that some of
139 * the settings changes for the new modeset (like new
140 * scanout buffer) don't latch properly..
141 */
142 mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
143
144 clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
145 clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
146 clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
147
148 bs_set(mdp4_dtv_encoder, 0);
149 }
150
151 mdp4_dtv_encoder->enabled = enabled;
152}
153
154static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
155 const struct drm_display_mode *mode,
156 struct drm_display_mode *adjusted_mode)
157{
158 return true;
159}
160
161static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
162 struct drm_display_mode *mode,
163 struct drm_display_mode *adjusted_mode)
164{
165 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
166 struct mdp4_kms *mdp4_kms = get_kms(encoder);
167 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
168 uint32_t display_v_start, display_v_end;
169 uint32_t hsync_start_x, hsync_end_x;
170
171 mode = adjusted_mode;
172
173 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
174 mode->base.id, mode->name,
175 mode->vrefresh, mode->clock,
176 mode->hdisplay, mode->hsync_start,
177 mode->hsync_end, mode->htotal,
178 mode->vdisplay, mode->vsync_start,
179 mode->vsync_end, mode->vtotal,
180 mode->type, mode->flags);
181
182 mdp4_dtv_encoder->pixclock = mode->clock * 1000;
183
184 DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
185
186 ctrl_pol = 0;
187 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
188 ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
189 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
190 ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
191 /* probably need to get DATA_EN polarity from panel.. */
192
193 dtv_hsync_skew = 0; /* get this from panel? */
194
195 hsync_start_x = (mode->htotal - mode->hsync_start);
196 hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
197
198 vsync_period = mode->vtotal * mode->htotal;
199 vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
200 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
201 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
202
203 mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
204 MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
205 MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
206 mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
207 mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
208 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
209 MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
210 MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
211 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
212 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
213 mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
214 mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
215 MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
216 MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
217 mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
218 mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
219 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
220 MDP4_DTV_ACTIVE_HCTL_START(0) |
221 MDP4_DTV_ACTIVE_HCTL_END(0));
222 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
223 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
224}
225
226static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
227{
228 mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
229}
230
231static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
232{
233 mdp4_crtc_set_config(encoder->crtc,
234 MDP4_DMA_CONFIG_R_BPC(BPC8) |
235 MDP4_DMA_CONFIG_G_BPC(BPC8) |
236 MDP4_DMA_CONFIG_B_BPC(BPC8) |
237 MDP4_DMA_CONFIG_PACK(0x21));
238 mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV);
239 mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
240}
241
242static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
243 .dpms = mdp4_dtv_encoder_dpms,
244 .mode_fixup = mdp4_dtv_encoder_mode_fixup,
245 .mode_set = mdp4_dtv_encoder_mode_set,
246 .prepare = mdp4_dtv_encoder_prepare,
247 .commit = mdp4_dtv_encoder_commit,
248};
249
250long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
251{
252 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
253 return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
254}
255
256/* initialize encoder */
257struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
258{
259 struct drm_encoder *encoder = NULL;
260 struct mdp4_dtv_encoder *mdp4_dtv_encoder;
261 int ret;
262
263 mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
264 if (!mdp4_dtv_encoder) {
265 ret = -ENOMEM;
266 goto fail;
267 }
268
269 encoder = &mdp4_dtv_encoder->base;
270
271 drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
272 DRM_MODE_ENCODER_TMDS);
273 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
274
275 mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
276 if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
277 dev_err(dev->dev, "failed to get src_clk\n");
278 ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
279 goto fail;
280 }
281
282 mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
283 if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
284 dev_err(dev->dev, "failed to get hdmi_clk\n");
285 ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
286 goto fail;
287 }
288
289 mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
290 if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
291 dev_err(dev->dev, "failed to get mdp_clk\n");
292 ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
293 goto fail;
294 }
295
296 bs_init(mdp4_dtv_encoder);
297
298 return encoder;
299
300fail:
301 if (encoder)
302 mdp4_dtv_encoder_destroy(encoder);
303
304 return ERR_PTR(ret);
305}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
new file mode 100644
index 000000000000..7b645f2e837a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
23 .base = { .pixel_format = DRM_FORMAT_ ## name }, \
24 .bpc_a = BPC ## a ## A, \
25 .bpc_r = BPC ## r, \
26 .bpc_g = BPC ## g, \
27 .bpc_b = BPC ## b, \
28 .unpack = { e0, e1, e2, e3 }, \
29 .alpha_enable = alpha, \
30 .unpack_tight = tight, \
31 .cpp = c, \
32 .unpack_count = cnt, \
33 }
34
35#define BPC0A 0
36
37static const struct mdp4_format formats[] = {
38 /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
39 FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
40 FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
41 FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3),
42 FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3),
43 FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3),
44 FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
45};
46
47const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
48{
49 int i;
50 for (i = 0; i < ARRAY_SIZE(formats); i++) {
51 const struct mdp4_format *f = &formats[i];
52 if (f->base.pixel_format == format)
53 return &f->base;
54 }
55 return NULL;
56}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
new file mode 100644
index 000000000000..5c6b7fca4edd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
@@ -0,0 +1,203 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22
23struct mdp4_irq_wait {
24 struct mdp4_irq irq;
25 int count;
26};
27
28static DECLARE_WAIT_QUEUE_HEAD(wait_event);
29
30static DEFINE_SPINLOCK(list_lock);
31
32static void update_irq(struct mdp4_kms *mdp4_kms)
33{
34 struct mdp4_irq *irq;
35 uint32_t irqmask = mdp4_kms->vblank_mask;
36
37 BUG_ON(!spin_is_locked(&list_lock));
38
39 list_for_each_entry(irq, &mdp4_kms->irq_list, node)
40 irqmask |= irq->irqmask;
41
42 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
43}
44
45static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
46{
47 unsigned long flags;
48 spin_lock_irqsave(&list_lock, flags);
49 update_irq(mdp4_kms);
50 spin_unlock_irqrestore(&list_lock, flags);
51}
52
53static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
54{
55 DRM_ERROR("errors: %08x\n", irqstatus);
56}
57
58void mdp4_irq_preinstall(struct msm_kms *kms)
59{
60 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
61 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
62}
63
64int mdp4_irq_postinstall(struct msm_kms *kms)
65{
66 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
67 struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
68
69 INIT_LIST_HEAD(&mdp4_kms->irq_list);
70
71 error_handler->irq = mdp4_irq_error_handler;
72 error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
73 MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
74
75 mdp4_irq_register(mdp4_kms, error_handler);
76
77 return 0;
78}
79
80void mdp4_irq_uninstall(struct msm_kms *kms)
81{
82 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
83 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
84}
85
86irqreturn_t mdp4_irq(struct msm_kms *kms)
87{
88 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
89 struct drm_device *dev = mdp4_kms->dev;
90 struct msm_drm_private *priv = dev->dev_private;
91 struct mdp4_irq *handler, *n;
92 unsigned long flags;
93 unsigned int id;
94 uint32_t status;
95
96 status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
97 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
98
99 VERB("status=%08x", status);
100
101 for (id = 0; id < priv->num_crtcs; id++)
102 if (status & mdp4_crtc_vblank(priv->crtcs[id]))
103 drm_handle_vblank(dev, id);
104
105 spin_lock_irqsave(&list_lock, flags);
106 mdp4_kms->in_irq = true;
107 list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
108 if (handler->irqmask & status) {
109 spin_unlock_irqrestore(&list_lock, flags);
110 handler->irq(handler, handler->irqmask & status);
111 spin_lock_irqsave(&list_lock, flags);
112 }
113 }
114 mdp4_kms->in_irq = false;
115 update_irq(mdp4_kms);
116 spin_unlock_irqrestore(&list_lock, flags);
117
118 return IRQ_HANDLED;
119}
120
121int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
122{
123 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
124 unsigned long flags;
125
126 spin_lock_irqsave(&list_lock, flags);
127 mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
128 update_irq(mdp4_kms);
129 spin_unlock_irqrestore(&list_lock, flags);
130
131 return 0;
132}
133
134void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
135{
136 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
137 unsigned long flags;
138
139 spin_lock_irqsave(&list_lock, flags);
140 mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
141 update_irq(mdp4_kms);
142 spin_unlock_irqrestore(&list_lock, flags);
143}
144
145static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
146{
147 struct mdp4_irq_wait *wait =
148 container_of(irq, struct mdp4_irq_wait, irq);
149 wait->count--;
150 wake_up_all(&wait_event);
151}
152
153void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
154{
155 struct mdp4_irq_wait wait = {
156 .irq = {
157 .irq = wait_irq,
158 .irqmask = irqmask,
159 },
160 .count = 1,
161 };
162 mdp4_irq_register(mdp4_kms, &wait.irq);
163 wait_event(wait_event, (wait.count <= 0));
164 mdp4_irq_unregister(mdp4_kms, &wait.irq);
165}
166
167void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
168{
169 unsigned long flags;
170 bool needs_update = false;
171
172 spin_lock_irqsave(&list_lock, flags);
173
174 if (!irq->registered) {
175 irq->registered = true;
176 list_add(&irq->node, &mdp4_kms->irq_list);
177 needs_update = !mdp4_kms->in_irq;
178 }
179
180 spin_unlock_irqrestore(&list_lock, flags);
181
182 if (needs_update)
183 update_irq_unlocked(mdp4_kms);
184}
185
186void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
187{
188 unsigned long flags;
189 bool needs_update = false;
190
191 spin_lock_irqsave(&list_lock, flags);
192
193 if (irq->registered) {
194 irq->registered = false;
195 list_del(&irq->node);
196 needs_update = !mdp4_kms->in_irq;
197 }
198
199 spin_unlock_irqrestore(&list_lock, flags);
200
201 if (needs_update)
202 update_irq_unlocked(mdp4_kms);
203}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
new file mode 100644
index 000000000000..5db5bbaedae2
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -0,0 +1,365 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22#include <mach/iommu.h>
23
24static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
25
26static int mdp4_hw_init(struct msm_kms *kms)
27{
28 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
29 struct drm_device *dev = mdp4_kms->dev;
30 uint32_t version, major, minor, dmap_cfg, vg_cfg;
31 unsigned long clk;
32 int ret = 0;
33
34 pm_runtime_get_sync(dev->dev);
35
36 version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
37
38 major = FIELD(version, MDP4_VERSION_MAJOR);
39 minor = FIELD(version, MDP4_VERSION_MINOR);
40
41 DBG("found MDP version v%d.%d", major, minor);
42
43 if (major != 4) {
44 dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
45 major, minor);
46 ret = -ENXIO;
47 goto out;
48 }
49
50 mdp4_kms->rev = minor;
51
52 if (mdp4_kms->dsi_pll_vdda) {
53 if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
54 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
55 1200000, 1200000);
56 if (ret) {
57 dev_err(dev->dev,
58 "failed to set dsi_pll_vdda voltage: %d\n", ret);
59 goto out;
60 }
61 }
62 }
63
64 if (mdp4_kms->dsi_pll_vddio) {
65 if (mdp4_kms->rev == 2) {
66 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
67 1800000, 1800000);
68 if (ret) {
69 dev_err(dev->dev,
70 "failed to set dsi_pll_vddio voltage: %d\n", ret);
71 goto out;
72 }
73 }
74 }
75
76 if (mdp4_kms->rev > 1) {
77 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
78 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
79 }
80
81 mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
82
83 /* max read pending cmd config, 3 pending requests: */
84 mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
85
86 clk = clk_get_rate(mdp4_kms->clk);
87
88 if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
89 dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
90 vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
91 } else {
92 dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
93 vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
94 }
95
96 DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
97
98 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
99 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
100
101 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
102 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
103 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
104 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
105
106 if (mdp4_kms->rev >= 2)
107 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
108
109 /* disable CSC matrix / YUV by default: */
110 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
111 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
112 mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
113 mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
114 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
115 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
116
117 if (mdp4_kms->rev > 1)
118 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
119
120out:
121 pm_runtime_put_sync(dev->dev);
122
123 return ret;
124}
125
126static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
127 struct drm_encoder *encoder)
128{
129 /* if we had >1 encoder, we'd need something more clever: */
130 return mdp4_dtv_round_pixclk(encoder, rate);
131}
132
133static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
134{
135 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
136 struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
137 unsigned i;
138
139 for (i = 0; i < priv->num_crtcs; i++)
140 mdp4_crtc_cancel_pending_flip(priv->crtcs[i]);
141}
142
143static void mdp4_destroy(struct msm_kms *kms)
144{
145 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
146 kfree(mdp4_kms);
147}
148
149static const struct msm_kms_funcs kms_funcs = {
150 .hw_init = mdp4_hw_init,
151 .irq_preinstall = mdp4_irq_preinstall,
152 .irq_postinstall = mdp4_irq_postinstall,
153 .irq_uninstall = mdp4_irq_uninstall,
154 .irq = mdp4_irq,
155 .enable_vblank = mdp4_enable_vblank,
156 .disable_vblank = mdp4_disable_vblank,
157 .get_format = mdp4_get_format,
158 .round_pixclk = mdp4_round_pixclk,
159 .preclose = mdp4_preclose,
160 .destroy = mdp4_destroy,
161};
162
163int mdp4_disable(struct mdp4_kms *mdp4_kms)
164{
165 DBG("");
166
167 clk_disable_unprepare(mdp4_kms->clk);
168 if (mdp4_kms->pclk)
169 clk_disable_unprepare(mdp4_kms->pclk);
170 clk_disable_unprepare(mdp4_kms->lut_clk);
171
172 return 0;
173}
174
175int mdp4_enable(struct mdp4_kms *mdp4_kms)
176{
177 DBG("");
178
179 clk_prepare_enable(mdp4_kms->clk);
180 if (mdp4_kms->pclk)
181 clk_prepare_enable(mdp4_kms->pclk);
182 clk_prepare_enable(mdp4_kms->lut_clk);
183
184 return 0;
185}
186
187static int modeset_init(struct mdp4_kms *mdp4_kms)
188{
189 struct drm_device *dev = mdp4_kms->dev;
190 struct msm_drm_private *priv = dev->dev_private;
191 struct drm_plane *plane;
192 struct drm_crtc *crtc;
193 struct drm_encoder *encoder;
194 int ret;
195
196 /*
197 * NOTE: this is a bit simplistic until we add support
198 * for more than just RGB1->DMA_E->DTV->HDMI
199 */
200
201 /* the CRTCs get constructed with a private plane: */
202 plane = mdp4_plane_init(dev, RGB1, true);
203 if (IS_ERR(plane)) {
204 dev_err(dev->dev, "failed to construct plane for RGB1\n");
205 ret = PTR_ERR(plane);
206 goto fail;
207 }
208
209 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
210 if (IS_ERR(crtc)) {
211 dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
212 ret = PTR_ERR(crtc);
213 goto fail;
214 }
215 priv->crtcs[priv->num_crtcs++] = crtc;
216
217 encoder = mdp4_dtv_encoder_init(dev);
218 if (IS_ERR(encoder)) {
219 dev_err(dev->dev, "failed to construct DTV encoder\n");
220 ret = PTR_ERR(encoder);
221 goto fail;
222 }
223 encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
224 priv->encoders[priv->num_encoders++] = encoder;
225
226 ret = hdmi_init(dev, encoder);
227 if (ret) {
228 dev_err(dev->dev, "failed to initialize HDMI\n");
229 goto fail;
230 }
231
232 return 0;
233
234fail:
235 return ret;
236}
237
238static const char *iommu_ports[] = {
239 "mdp_port0_cb0", "mdp_port1_cb0",
240};
241
242struct msm_kms *mdp4_kms_init(struct drm_device *dev)
243{
244 struct platform_device *pdev = dev->platformdev;
245 struct mdp4_platform_config *config = mdp4_get_config(pdev);
246 struct mdp4_kms *mdp4_kms;
247 struct msm_kms *kms = NULL;
248 int ret;
249
250 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
251 if (!mdp4_kms) {
252 dev_err(dev->dev, "failed to allocate kms\n");
253 ret = -ENOMEM;
254 goto fail;
255 }
256
257 kms = &mdp4_kms->base;
258 kms->funcs = &kms_funcs;
259
260 mdp4_kms->dev = dev;
261
262 mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
263 if (IS_ERR(mdp4_kms->mmio)) {
264 ret = PTR_ERR(mdp4_kms->mmio);
265 goto fail;
266 }
267
268 mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
269 if (IS_ERR(mdp4_kms->dsi_pll_vdda))
270 mdp4_kms->dsi_pll_vdda = NULL;
271
272 mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
273 if (IS_ERR(mdp4_kms->dsi_pll_vddio))
274 mdp4_kms->dsi_pll_vddio = NULL;
275
276 mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
277 if (IS_ERR(mdp4_kms->vdd))
278 mdp4_kms->vdd = NULL;
279
280 if (mdp4_kms->vdd) {
281 ret = regulator_enable(mdp4_kms->vdd);
282 if (ret) {
283 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
284 goto fail;
285 }
286 }
287
288 mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
289 if (IS_ERR(mdp4_kms->clk)) {
290 dev_err(dev->dev, "failed to get core_clk\n");
291 ret = PTR_ERR(mdp4_kms->clk);
292 goto fail;
293 }
294
295 mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
296 if (IS_ERR(mdp4_kms->pclk))
297 mdp4_kms->pclk = NULL;
298
299 // XXX if (rev >= MDP_REV_42) { ???
300 mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
301 if (IS_ERR(mdp4_kms->lut_clk)) {
302 dev_err(dev->dev, "failed to get lut_clk\n");
303 ret = PTR_ERR(mdp4_kms->lut_clk);
304 goto fail;
305 }
306
307 clk_set_rate(mdp4_kms->clk, config->max_clk);
308 clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
309
310 if (!config->iommu) {
311 dev_err(dev->dev, "no iommu\n");
312 ret = -ENXIO;
313 goto fail;
314 }
315
316 /* make sure things are off before attaching iommu (bootloader could
317 * have left things on, in which case we'll start getting faults if
318 * we don't disable):
319 */
320 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
321 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
322 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
323 mdelay(16);
324
325 ret = msm_iommu_attach(dev, config->iommu,
326 iommu_ports, ARRAY_SIZE(iommu_ports));
327 if (ret)
328 goto fail;
329
330 mdp4_kms->id = msm_register_iommu(dev, config->iommu);
331 if (mdp4_kms->id < 0) {
332 ret = mdp4_kms->id;
333 dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
334 goto fail;
335 }
336
337 ret = modeset_init(mdp4_kms);
338 if (ret) {
339 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
340 goto fail;
341 }
342
343 return kms;
344
345fail:
346 if (kms)
347 mdp4_destroy(kms);
348 return ERR_PTR(ret);
349}
350
351static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
352{
353 static struct mdp4_platform_config config = {};
354#ifdef CONFIG_OF
355 /* TODO */
356#else
357 if (cpu_is_apq8064())
358 config.max_clk = 266667000;
359 else
360 config.max_clk = 200000000;
361
362 config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
363#endif
364 return &config;
365}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
new file mode 100644
index 000000000000..1e83554955f3
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -0,0 +1,194 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MDP4_KMS_H__
19#define __MDP4_KMS_H__
20
21#include <linux/clk.h>
22#include <linux/platform_device.h>
23#include <linux/regulator/consumer.h>
24
25#include "msm_drv.h"
26#include "mdp4.xml.h"
27
28
29/* For transiently registering for different MDP4 irqs that various parts
30 * of the KMS code need during setup/configuration. We these are not
31 * necessarily the same as what drm_vblank_get/put() are requesting, and
32 * the hysteresis in drm_vblank_put() is not necessarily desirable for
33 * internal housekeeping related irq usage.
34 */
35struct mdp4_irq {
36 struct list_head node;
37 uint32_t irqmask;
38 bool registered;
39 void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
40};
41
42struct mdp4_kms {
43 struct msm_kms base;
44
45 struct drm_device *dev;
46
47 int rev;
48
49 /* mapper-id used to request GEM buffer mapped for scanout: */
50 int id;
51
52 void __iomem *mmio;
53
54 struct regulator *dsi_pll_vdda;
55 struct regulator *dsi_pll_vddio;
56 struct regulator *vdd;
57
58 struct clk *clk;
59 struct clk *pclk;
60 struct clk *lut_clk;
61
62 /* irq handling: */
63 bool in_irq;
64 struct list_head irq_list; /* list of mdp4_irq */
65 uint32_t vblank_mask; /* irq bits set for userspace vblank */
66 struct mdp4_irq error_handler;
67};
68#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
69
70/* platform config data (ie. from DT, or pdata) */
71struct mdp4_platform_config {
72 struct iommu_domain *iommu;
73 uint32_t max_clk;
74};
75
76struct mdp4_format {
77 struct msm_format base;
78 enum mpd4_bpc bpc_r, bpc_g, bpc_b;
79 enum mpd4_bpc_alpha bpc_a;
80 uint8_t unpack[4];
81 bool alpha_enable, unpack_tight;
82 uint8_t cpp, unpack_count;
83};
84#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
85
86static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
87{
88 msm_writel(data, mdp4_kms->mmio + reg);
89}
90
91static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
92{
93 return msm_readl(mdp4_kms->mmio + reg);
94}
95
96static inline uint32_t pipe2flush(enum mpd4_pipe pipe)
97{
98 switch (pipe) {
99 case VG1: return MDP4_OVERLAY_FLUSH_VG1;
100 case VG2: return MDP4_OVERLAY_FLUSH_VG2;
101 case RGB1: return MDP4_OVERLAY_FLUSH_RGB1;
102 case RGB2: return MDP4_OVERLAY_FLUSH_RGB1;
103 default: return 0;
104 }
105}
106
107static inline uint32_t ovlp2flush(int ovlp)
108{
109 switch (ovlp) {
110 case 0: return MDP4_OVERLAY_FLUSH_OVLP0;
111 case 1: return MDP4_OVERLAY_FLUSH_OVLP1;
112 default: return 0;
113 }
114}
115
116static inline uint32_t dma2irq(enum mdp4_dma dma)
117{
118 switch (dma) {
119 case DMA_P: return MDP4_IRQ_DMA_P_DONE;
120 case DMA_S: return MDP4_IRQ_DMA_S_DONE;
121 case DMA_E: return MDP4_IRQ_DMA_E_DONE;
122 default: return 0;
123 }
124}
125
126static inline uint32_t dma2err(enum mdp4_dma dma)
127{
128 switch (dma) {
129 case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
130 case DMA_S: return 0; // ???
131 case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
132 default: return 0;
133 }
134}
135
136int mdp4_disable(struct mdp4_kms *mdp4_kms);
137int mdp4_enable(struct mdp4_kms *mdp4_kms);
138
139void mdp4_irq_preinstall(struct msm_kms *kms);
140int mdp4_irq_postinstall(struct msm_kms *kms);
141void mdp4_irq_uninstall(struct msm_kms *kms);
142irqreturn_t mdp4_irq(struct msm_kms *kms);
143void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
144void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
145void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
146int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
147void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
148
149const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
150
151void mdp4_plane_install_properties(struct drm_plane *plane,
152 struct drm_mode_object *obj);
153void mdp4_plane_set_scanout(struct drm_plane *plane,
154 struct drm_framebuffer *fb);
155int mdp4_plane_mode_set(struct drm_plane *plane,
156 struct drm_crtc *crtc, struct drm_framebuffer *fb,
157 int crtc_x, int crtc_y,
158 unsigned int crtc_w, unsigned int crtc_h,
159 uint32_t src_x, uint32_t src_y,
160 uint32_t src_w, uint32_t src_h);
161enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane);
162struct drm_plane *mdp4_plane_init(struct drm_device *dev,
163 enum mpd4_pipe pipe_id, bool private_plane);
164
165uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
166void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc);
167void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
168void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
169struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
170 struct drm_plane *plane, int id, int ovlp_id,
171 enum mdp4_dma dma_id);
172
173long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
174struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
175
176#ifdef CONFIG_MSM_BUS_SCALING
177static inline int match_dev_name(struct device *dev, void *data)
178{
179 return !strcmp(dev_name(dev), data);
180}
181/* bus scaling data is associated with extra pointless platform devices,
182 * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
183 * to find their pdata to make the bus-scaling stuff work.
184 */
185static inline void *mdp4_find_pdata(const char *devname)
186{
187 struct device *dev;
188 dev = bus_find_device(&platform_bus_type, NULL,
189 (void *)devname, match_dev_name);
190 return dev ? dev->platform_data : NULL;
191}
192#endif
193
194#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
new file mode 100644
index 000000000000..3468229d58b3
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp4_kms.h"
19
20
21struct mdp4_plane {
22 struct drm_plane base;
23 const char *name;
24
25 enum mpd4_pipe pipe;
26
27 uint32_t nformats;
28 uint32_t formats[32];
29
30 bool enabled;
31};
32#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
33
34static struct mdp4_kms *get_kms(struct drm_plane *plane)
35{
36 struct msm_drm_private *priv = plane->dev->dev_private;
37 return to_mdp4_kms(priv->kms);
38}
39
40static int mdp4_plane_update(struct drm_plane *plane,
41 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h)
46{
47 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
48
49 mdp4_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp4_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59}
60
61static int mdp4_plane_disable(struct drm_plane *plane)
62{
63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
64 DBG("%s: TODO", mdp4_plane->name); // XXX
65 return 0;
66}
67
68static void mdp4_plane_destroy(struct drm_plane *plane)
69{
70 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
71
72 mdp4_plane_disable(plane);
73 drm_plane_cleanup(plane);
74
75 kfree(mdp4_plane);
76}
77
78/* helper to install properties which are common to planes and crtcs */
79void mdp4_plane_install_properties(struct drm_plane *plane,
80 struct drm_mode_object *obj)
81{
82 // XXX
83}
84
85int mdp4_plane_set_property(struct drm_plane *plane,
86 struct drm_property *property, uint64_t val)
87{
88 // XXX
89 return -EINVAL;
90}
91
92static const struct drm_plane_funcs mdp4_plane_funcs = {
93 .update_plane = mdp4_plane_update,
94 .disable_plane = mdp4_plane_disable,
95 .destroy = mdp4_plane_destroy,
96 .set_property = mdp4_plane_set_property,
97};
98
99void mdp4_plane_set_scanout(struct drm_plane *plane,
100 struct drm_framebuffer *fb)
101{
102 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
103 struct mdp4_kms *mdp4_kms = get_kms(plane);
104 enum mpd4_pipe pipe = mdp4_plane->pipe;
105 uint32_t iova;
106
107 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
108 MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
109 MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
110
111 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
112 MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
113 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
114
115 msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
116 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
117
118 plane->fb = fb;
119}
120
121#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
122
123int mdp4_plane_mode_set(struct drm_plane *plane,
124 struct drm_crtc *crtc, struct drm_framebuffer *fb,
125 int crtc_x, int crtc_y,
126 unsigned int crtc_w, unsigned int crtc_h,
127 uint32_t src_x, uint32_t src_y,
128 uint32_t src_w, uint32_t src_h)
129{
130 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
131 struct mdp4_kms *mdp4_kms = get_kms(plane);
132 enum mpd4_pipe pipe = mdp4_plane->pipe;
133 const struct mdp4_format *format;
134 uint32_t op_mode = 0;
135 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
136 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
137
138 /* src values are in Q16 fixed point, convert to integer: */
139 src_x = src_x >> 16;
140 src_y = src_y >> 16;
141 src_w = src_w >> 16;
142 src_h = src_h >> 16;
143
144 if (src_w != crtc_w) {
145 op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
146 /* TODO calc phasex_step */
147 }
148
149 if (src_h != crtc_h) {
150 op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
151 /* TODO calc phasey_step */
152 }
153
154 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
155 MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
156 MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
157
158 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
159 MDP4_PIPE_SRC_XY_X(src_x) |
160 MDP4_PIPE_SRC_XY_Y(src_y));
161
162 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
163 MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
164 MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
165
166 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
167 MDP4_PIPE_SRC_XY_X(crtc_x) |
168 MDP4_PIPE_SRC_XY_Y(crtc_y));
169
170 mdp4_plane_set_scanout(plane, fb);
171
172 format = to_mdp4_format(msm_framebuffer_format(fb));
173
174 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
175 MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
176 MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
177 MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
178 MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
179 COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
180 MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
181 MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
182 COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
183
184 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
185 MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
186 MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
187 MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
188 MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
189
190 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
191 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
192 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
193
194 plane->crtc = crtc;
195
196 return 0;
197}
198
199static const char *pipe_names[] = {
200 "VG1", "VG2",
201 "RGB1", "RGB2", "RGB3",
202 "VG3", "VG4",
203};
204
205enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
206{
207 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
208 return mdp4_plane->pipe;
209}
210
211/* initialize plane */
212struct drm_plane *mdp4_plane_init(struct drm_device *dev,
213 enum mpd4_pipe pipe_id, bool private_plane)
214{
215 struct msm_drm_private *priv = dev->dev_private;
216 struct drm_plane *plane = NULL;
217 struct mdp4_plane *mdp4_plane;
218 int ret;
219
220 mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
221 if (!mdp4_plane) {
222 ret = -ENOMEM;
223 goto fail;
224 }
225
226 plane = &mdp4_plane->base;
227
228 mdp4_plane->pipe = pipe_id;
229 mdp4_plane->name = pipe_names[pipe_id];
230
231 drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs,
232 mdp4_plane->formats, mdp4_plane->nformats, private_plane);
233
234 mdp4_plane_install_properties(plane, &plane->base);
235
236 return plane;
237
238fail:
239 if (plane)
240 mdp4_plane_destroy(plane);
241
242 return ERR_PTR(ret);
243}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
new file mode 100644
index 000000000000..864c9773636b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -0,0 +1,776 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gpu.h"
20
21#include <mach/iommu.h>
22
23static void msm_fb_output_poll_changed(struct drm_device *dev)
24{
25 struct msm_drm_private *priv = dev->dev_private;
26 if (priv->fbdev)
27 drm_fb_helper_hotplug_event(priv->fbdev);
28}
29
30static const struct drm_mode_config_funcs mode_config_funcs = {
31 .fb_create = msm_framebuffer_create,
32 .output_poll_changed = msm_fb_output_poll_changed,
33};
34
35static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
36 unsigned long iova, int flags, void *arg)
37{
38 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
39 return 0;
40}
41
42int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
43{
44 struct msm_drm_private *priv = dev->dev_private;
45 int idx = priv->num_iommus++;
46
47 if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
48 return -EINVAL;
49
50 priv->iommus[idx] = iommu;
51
52 iommu_set_fault_handler(iommu, msm_fault_handler, dev);
53
54 /* need to iommu_attach_device() somewhere?? on resume?? */
55
56 return idx;
57}
58
59int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
60 const char **names, int cnt)
61{
62 int i, ret;
63
64 for (i = 0; i < cnt; i++) {
65 struct device *ctx = msm_iommu_get_ctx(names[i]);
66 if (!ctx)
67 continue;
68 ret = iommu_attach_device(iommu, ctx);
69 if (ret) {
70 dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
71 return ret;
72 }
73 }
74 return 0;
75}
76
77#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
78static bool reglog = false;
79MODULE_PARM_DESC(reglog, "Enable register read/write logging");
80module_param(reglog, bool, 0600);
81#else
82#define reglog 0
83#endif
84
85void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
86 const char *dbgname)
87{
88 struct resource *res;
89 unsigned long size;
90 void __iomem *ptr;
91
92 if (name)
93 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
94 else
95 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
96
97 if (!res) {
98 dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
99 return ERR_PTR(-EINVAL);
100 }
101
102 size = resource_size(res);
103
104 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
105 if (!ptr) {
106 dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
107 return ERR_PTR(-ENOMEM);
108 }
109
110 if (reglog)
111 printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
112
113 return ptr;
114}
115
116void msm_writel(u32 data, void __iomem *addr)
117{
118 if (reglog)
119 printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
120 writel(data, addr);
121}
122
123u32 msm_readl(const void __iomem *addr)
124{
125 u32 val = readl(addr);
126 if (reglog)
127 printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
128 return val;
129}
130
131/*
132 * DRM operations:
133 */
134
135static int msm_unload(struct drm_device *dev)
136{
137 struct msm_drm_private *priv = dev->dev_private;
138 struct msm_kms *kms = priv->kms;
139 struct msm_gpu *gpu = priv->gpu;
140
141 drm_kms_helper_poll_fini(dev);
142 drm_mode_config_cleanup(dev);
143 drm_vblank_cleanup(dev);
144
145 pm_runtime_get_sync(dev->dev);
146 drm_irq_uninstall(dev);
147 pm_runtime_put_sync(dev->dev);
148
149 flush_workqueue(priv->wq);
150 destroy_workqueue(priv->wq);
151
152 if (kms) {
153 pm_runtime_disable(dev->dev);
154 kms->funcs->destroy(kms);
155 }
156
157 if (gpu) {
158 mutex_lock(&dev->struct_mutex);
159 gpu->funcs->pm_suspend(gpu);
160 gpu->funcs->destroy(gpu);
161 mutex_unlock(&dev->struct_mutex);
162 }
163
164 dev->dev_private = NULL;
165
166 kfree(priv);
167
168 return 0;
169}
170
171static int msm_load(struct drm_device *dev, unsigned long flags)
172{
173 struct platform_device *pdev = dev->platformdev;
174 struct msm_drm_private *priv;
175 struct msm_kms *kms;
176 int ret;
177
178 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
179 if (!priv) {
180 dev_err(dev->dev, "failed to allocate private data\n");
181 return -ENOMEM;
182 }
183
184 dev->dev_private = priv;
185
186 priv->wq = alloc_ordered_workqueue("msm", 0);
187 init_waitqueue_head(&priv->fence_event);
188
189 INIT_LIST_HEAD(&priv->inactive_list);
190
191 drm_mode_config_init(dev);
192
193 kms = mdp4_kms_init(dev);
194 if (IS_ERR(kms)) {
195 /*
196 * NOTE: once we have GPU support, having no kms should not
197 * be considered fatal.. ideally we would still support gpu
198 * and (for example) use dmabuf/prime to share buffers with
199 * imx drm driver on iMX5
200 */
201 dev_err(dev->dev, "failed to load kms\n");
202 ret = PTR_ERR(priv->kms);
203 goto fail;
204 }
205
206 priv->kms = kms;
207
208 if (kms) {
209 pm_runtime_enable(dev->dev);
210 ret = kms->funcs->hw_init(kms);
211 if (ret) {
212 dev_err(dev->dev, "kms hw init failed: %d\n", ret);
213 goto fail;
214 }
215 }
216
217 dev->mode_config.min_width = 0;
218 dev->mode_config.min_height = 0;
219 dev->mode_config.max_width = 2048;
220 dev->mode_config.max_height = 2048;
221 dev->mode_config.funcs = &mode_config_funcs;
222
223 ret = drm_vblank_init(dev, 1);
224 if (ret < 0) {
225 dev_err(dev->dev, "failed to initialize vblank\n");
226 goto fail;
227 }
228
229 pm_runtime_get_sync(dev->dev);
230 ret = drm_irq_install(dev);
231 pm_runtime_put_sync(dev->dev);
232 if (ret < 0) {
233 dev_err(dev->dev, "failed to install IRQ handler\n");
234 goto fail;
235 }
236
237 platform_set_drvdata(pdev, dev);
238
239#ifdef CONFIG_DRM_MSM_FBDEV
240 priv->fbdev = msm_fbdev_init(dev);
241#endif
242
243 drm_kms_helper_poll_init(dev);
244
245 return 0;
246
247fail:
248 msm_unload(dev);
249 return ret;
250}
251
252static void load_gpu(struct drm_device *dev)
253{
254 struct msm_drm_private *priv = dev->dev_private;
255 struct msm_gpu *gpu;
256
257 if (priv->gpu)
258 return;
259
260 mutex_lock(&dev->struct_mutex);
261 gpu = a3xx_gpu_init(dev);
262 if (IS_ERR(gpu)) {
263 dev_warn(dev->dev, "failed to load a3xx gpu\n");
264 gpu = NULL;
265 /* not fatal */
266 }
267 mutex_unlock(&dev->struct_mutex);
268
269 if (gpu) {
270 int ret;
271 gpu->funcs->pm_resume(gpu);
272 ret = gpu->funcs->hw_init(gpu);
273 if (ret) {
274 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
275 gpu->funcs->destroy(gpu);
276 gpu = NULL;
277 }
278 }
279
280 priv->gpu = gpu;
281}
282
283static int msm_open(struct drm_device *dev, struct drm_file *file)
284{
285 struct msm_file_private *ctx;
286
287 /* For now, load gpu on open.. to avoid the requirement of having
288 * firmware in the initrd.
289 */
290 load_gpu(dev);
291
292 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
293 if (!ctx)
294 return -ENOMEM;
295
296 file->driver_priv = ctx;
297
298 return 0;
299}
300
301static void msm_preclose(struct drm_device *dev, struct drm_file *file)
302{
303 struct msm_drm_private *priv = dev->dev_private;
304 struct msm_file_private *ctx = file->driver_priv;
305 struct msm_kms *kms = priv->kms;
306
307 if (kms)
308 kms->funcs->preclose(kms, file);
309
310 mutex_lock(&dev->struct_mutex);
311 if (ctx == priv->lastctx)
312 priv->lastctx = NULL;
313 mutex_unlock(&dev->struct_mutex);
314
315 kfree(ctx);
316}
317
318static void msm_lastclose(struct drm_device *dev)
319{
320 struct msm_drm_private *priv = dev->dev_private;
321 if (priv->fbdev) {
322 drm_modeset_lock_all(dev);
323 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
324 drm_modeset_unlock_all(dev);
325 }
326}
327
328static irqreturn_t msm_irq(DRM_IRQ_ARGS)
329{
330 struct drm_device *dev = arg;
331 struct msm_drm_private *priv = dev->dev_private;
332 struct msm_kms *kms = priv->kms;
333 BUG_ON(!kms);
334 return kms->funcs->irq(kms);
335}
336
337static void msm_irq_preinstall(struct drm_device *dev)
338{
339 struct msm_drm_private *priv = dev->dev_private;
340 struct msm_kms *kms = priv->kms;
341 BUG_ON(!kms);
342 kms->funcs->irq_preinstall(kms);
343}
344
345static int msm_irq_postinstall(struct drm_device *dev)
346{
347 struct msm_drm_private *priv = dev->dev_private;
348 struct msm_kms *kms = priv->kms;
349 BUG_ON(!kms);
350 return kms->funcs->irq_postinstall(kms);
351}
352
353static void msm_irq_uninstall(struct drm_device *dev)
354{
355 struct msm_drm_private *priv = dev->dev_private;
356 struct msm_kms *kms = priv->kms;
357 BUG_ON(!kms);
358 kms->funcs->irq_uninstall(kms);
359}
360
361static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
362{
363 struct msm_drm_private *priv = dev->dev_private;
364 struct msm_kms *kms = priv->kms;
365 if (!kms)
366 return -ENXIO;
367 DBG("dev=%p, crtc=%d", dev, crtc_id);
368 return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
369}
370
371static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
372{
373 struct msm_drm_private *priv = dev->dev_private;
374 struct msm_kms *kms = priv->kms;
375 if (!kms)
376 return;
377 DBG("dev=%p, crtc=%d", dev, crtc_id);
378 kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
379}
380
381/*
382 * DRM debugfs:
383 */
384
385#ifdef CONFIG_DEBUG_FS
386static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
387{
388 struct msm_drm_private *priv = dev->dev_private;
389 struct msm_gpu *gpu = priv->gpu;
390
391 if (gpu) {
392 seq_printf(m, "%s Status:\n", gpu->name);
393 gpu->funcs->show(gpu, m);
394 }
395
396 return 0;
397}
398
399static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
400{
401 struct msm_drm_private *priv = dev->dev_private;
402 struct msm_gpu *gpu = priv->gpu;
403
404 if (gpu) {
405 seq_printf(m, "Active Objects (%s):\n", gpu->name);
406 msm_gem_describe_objects(&gpu->active_list, m);
407 }
408
409 seq_printf(m, "Inactive Objects:\n");
410 msm_gem_describe_objects(&priv->inactive_list, m);
411
412 return 0;
413}
414
415static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
416{
417 return drm_mm_dump_table(m, dev->mm_private);
418}
419
420static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
421{
422 struct msm_drm_private *priv = dev->dev_private;
423 struct drm_framebuffer *fb, *fbdev_fb = NULL;
424
425 if (priv->fbdev) {
426 seq_printf(m, "fbcon ");
427 fbdev_fb = priv->fbdev->fb;
428 msm_framebuffer_describe(fbdev_fb, m);
429 }
430
431 mutex_lock(&dev->mode_config.fb_lock);
432 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
433 if (fb == fbdev_fb)
434 continue;
435
436 seq_printf(m, "user ");
437 msm_framebuffer_describe(fb, m);
438 }
439 mutex_unlock(&dev->mode_config.fb_lock);
440
441 return 0;
442}
443
444static int show_locked(struct seq_file *m, void *arg)
445{
446 struct drm_info_node *node = (struct drm_info_node *) m->private;
447 struct drm_device *dev = node->minor->dev;
448 int (*show)(struct drm_device *dev, struct seq_file *m) =
449 node->info_ent->data;
450 int ret;
451
452 ret = mutex_lock_interruptible(&dev->struct_mutex);
453 if (ret)
454 return ret;
455
456 ret = show(dev, m);
457
458 mutex_unlock(&dev->struct_mutex);
459
460 return ret;
461}
462
463static struct drm_info_list msm_debugfs_list[] = {
464 {"gpu", show_locked, 0, msm_gpu_show},
465 {"gem", show_locked, 0, msm_gem_show},
466 { "mm", show_locked, 0, msm_mm_show },
467 { "fb", show_locked, 0, msm_fb_show },
468};
469
470static int msm_debugfs_init(struct drm_minor *minor)
471{
472 struct drm_device *dev = minor->dev;
473 int ret;
474
475 ret = drm_debugfs_create_files(msm_debugfs_list,
476 ARRAY_SIZE(msm_debugfs_list),
477 minor->debugfs_root, minor);
478
479 if (ret) {
480 dev_err(dev->dev, "could not install msm_debugfs_list\n");
481 return ret;
482 }
483
484 return ret;
485}
486
487static void msm_debugfs_cleanup(struct drm_minor *minor)
488{
489 drm_debugfs_remove_files(msm_debugfs_list,
490 ARRAY_SIZE(msm_debugfs_list), minor);
491}
492#endif
493
494/*
495 * Fences:
496 */
497
498int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
499 struct timespec *timeout)
500{
501 struct msm_drm_private *priv = dev->dev_private;
502 unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
503 unsigned long start_jiffies = jiffies;
504 unsigned long remaining_jiffies;
505 int ret;
506
507 if (time_after(start_jiffies, timeout_jiffies))
508 remaining_jiffies = 0;
509 else
510 remaining_jiffies = timeout_jiffies - start_jiffies;
511
512 ret = wait_event_interruptible_timeout(priv->fence_event,
513 priv->completed_fence >= fence,
514 remaining_jiffies);
515 if (ret == 0) {
516 DBG("timeout waiting for fence: %u (completed: %u)",
517 fence, priv->completed_fence);
518 ret = -ETIMEDOUT;
519 } else if (ret != -ERESTARTSYS) {
520 ret = 0;
521 }
522
523 return ret;
524}
525
526/* call under struct_mutex */
527void msm_update_fence(struct drm_device *dev, uint32_t fence)
528{
529 struct msm_drm_private *priv = dev->dev_private;
530
531 if (fence > priv->completed_fence) {
532 priv->completed_fence = fence;
533 wake_up_all(&priv->fence_event);
534 }
535}
536
537/*
538 * DRM ioctls:
539 */
540
541static int msm_ioctl_get_param(struct drm_device *dev, void *data,
542 struct drm_file *file)
543{
544 struct msm_drm_private *priv = dev->dev_private;
545 struct drm_msm_param *args = data;
546 struct msm_gpu *gpu;
547
548 /* for now, we just have 3d pipe.. eventually this would need to
549 * be more clever to dispatch to appropriate gpu module:
550 */
551 if (args->pipe != MSM_PIPE_3D0)
552 return -EINVAL;
553
554 gpu = priv->gpu;
555
556 if (!gpu)
557 return -ENXIO;
558
559 return gpu->funcs->get_param(gpu, args->param, &args->value);
560}
561
562static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
563 struct drm_file *file)
564{
565 struct drm_msm_gem_new *args = data;
566 return msm_gem_new_handle(dev, file, args->size,
567 args->flags, &args->handle);
568}
569
570#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
571
572static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
573 struct drm_file *file)
574{
575 struct drm_msm_gem_cpu_prep *args = data;
576 struct drm_gem_object *obj;
577 int ret;
578
579 obj = drm_gem_object_lookup(dev, file, args->handle);
580 if (!obj)
581 return -ENOENT;
582
583 ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
584
585 drm_gem_object_unreference_unlocked(obj);
586
587 return ret;
588}
589
590static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
591 struct drm_file *file)
592{
593 struct drm_msm_gem_cpu_fini *args = data;
594 struct drm_gem_object *obj;
595 int ret;
596
597 obj = drm_gem_object_lookup(dev, file, args->handle);
598 if (!obj)
599 return -ENOENT;
600
601 ret = msm_gem_cpu_fini(obj);
602
603 drm_gem_object_unreference_unlocked(obj);
604
605 return ret;
606}
607
608static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
609 struct drm_file *file)
610{
611 struct drm_msm_gem_info *args = data;
612 struct drm_gem_object *obj;
613 int ret = 0;
614
615 if (args->pad)
616 return -EINVAL;
617
618 obj = drm_gem_object_lookup(dev, file, args->handle);
619 if (!obj)
620 return -ENOENT;
621
622 args->offset = msm_gem_mmap_offset(obj);
623
624 drm_gem_object_unreference_unlocked(obj);
625
626 return ret;
627}
628
629static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
630 struct drm_file *file)
631{
632 struct drm_msm_wait_fence *args = data;
633 return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
634}
635
636static const struct drm_ioctl_desc msm_ioctls[] = {
637 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
638 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
639 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
640 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
641 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
642 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH),
643 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH),
644};
645
646static const struct vm_operations_struct vm_ops = {
647 .fault = msm_gem_fault,
648 .open = drm_gem_vm_open,
649 .close = drm_gem_vm_close,
650};
651
652static const struct file_operations fops = {
653 .owner = THIS_MODULE,
654 .open = drm_open,
655 .release = drm_release,
656 .unlocked_ioctl = drm_ioctl,
657#ifdef CONFIG_COMPAT
658 .compat_ioctl = drm_compat_ioctl,
659#endif
660 .poll = drm_poll,
661 .read = drm_read,
662 .llseek = no_llseek,
663 .mmap = msm_gem_mmap,
664};
665
666static struct drm_driver msm_driver = {
667 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
668 .load = msm_load,
669 .unload = msm_unload,
670 .open = msm_open,
671 .preclose = msm_preclose,
672 .lastclose = msm_lastclose,
673 .irq_handler = msm_irq,
674 .irq_preinstall = msm_irq_preinstall,
675 .irq_postinstall = msm_irq_postinstall,
676 .irq_uninstall = msm_irq_uninstall,
677 .get_vblank_counter = drm_vblank_count,
678 .enable_vblank = msm_enable_vblank,
679 .disable_vblank = msm_disable_vblank,
680 .gem_free_object = msm_gem_free_object,
681 .gem_vm_ops = &vm_ops,
682 .dumb_create = msm_gem_dumb_create,
683 .dumb_map_offset = msm_gem_dumb_map_offset,
684 .dumb_destroy = msm_gem_dumb_destroy,
685#ifdef CONFIG_DEBUG_FS
686 .debugfs_init = msm_debugfs_init,
687 .debugfs_cleanup = msm_debugfs_cleanup,
688#endif
689 .ioctls = msm_ioctls,
690 .num_ioctls = DRM_MSM_NUM_IOCTLS,
691 .fops = &fops,
692 .name = "msm",
693 .desc = "MSM Snapdragon DRM",
694 .date = "20130625",
695 .major = 1,
696 .minor = 0,
697};
698
699#ifdef CONFIG_PM_SLEEP
700static int msm_pm_suspend(struct device *dev)
701{
702 struct drm_device *ddev = dev_get_drvdata(dev);
703
704 drm_kms_helper_poll_disable(ddev);
705
706 return 0;
707}
708
709static int msm_pm_resume(struct device *dev)
710{
711 struct drm_device *ddev = dev_get_drvdata(dev);
712
713 drm_kms_helper_poll_enable(ddev);
714
715 return 0;
716}
717#endif
718
719static const struct dev_pm_ops msm_pm_ops = {
720 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
721};
722
723/*
724 * Platform driver:
725 */
726
727static int msm_pdev_probe(struct platform_device *pdev)
728{
729 return drm_platform_init(&msm_driver, pdev);
730}
731
732static int msm_pdev_remove(struct platform_device *pdev)
733{
734 drm_platform_exit(&msm_driver, pdev);
735
736 return 0;
737}
738
739static const struct platform_device_id msm_id[] = {
740 { "mdp", 0 },
741 { }
742};
743
744static struct platform_driver msm_platform_driver = {
745 .probe = msm_pdev_probe,
746 .remove = msm_pdev_remove,
747 .driver = {
748 .owner = THIS_MODULE,
749 .name = "msm",
750 .pm = &msm_pm_ops,
751 },
752 .id_table = msm_id,
753};
754
755static int __init msm_drm_register(void)
756{
757 DBG("init");
758 hdmi_register();
759 a3xx_register();
760 return platform_driver_register(&msm_platform_driver);
761}
762
763static void __exit msm_drm_unregister(void)
764{
765 DBG("fini");
766 platform_driver_unregister(&msm_platform_driver);
767 hdmi_unregister();
768 a3xx_unregister();
769}
770
771module_init(msm_drm_register);
772module_exit(msm_drm_unregister);
773
774MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
775MODULE_DESCRIPTION("MSM DRM Driver");
776MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
new file mode 100644
index 000000000000..80d75094bf0a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -0,0 +1,213 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_DRV_H__
19#define __MSM_DRV_H__
20
21#include <linux/kernel.h>
22#include <linux/clk.h>
23#include <linux/cpufreq.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/pm.h>
27#include <linux/pm_runtime.h>
28#include <linux/slab.h>
29#include <linux/list.h>
30#include <linux/iommu.h>
31#include <linux/types.h>
32#include <asm/sizes.h>
33
34#ifndef CONFIG_OF
35#include <mach/board.h>
36#include <mach/socinfo.h>
37#include <mach/iommu_domains.h>
38#endif
39
40#include <drm/drmP.h>
41#include <drm/drm_crtc_helper.h>
42#include <drm/drm_fb_helper.h>
43#include <drm/msm_drm.h>
44
45struct msm_kms;
46struct msm_gpu;
47
48#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
49
50struct msm_file_private {
51 /* currently we don't do anything useful with this.. but when
52 * per-context address spaces are supported we'd keep track of
53 * the context's page-tables here.
54 */
55 int dummy;
56};
57
58struct msm_drm_private {
59
60 struct msm_kms *kms;
61
62 /* when we have more than one 'msm_gpu' these need to be an array: */
63 struct msm_gpu *gpu;
64 struct msm_file_private *lastctx;
65
66 struct drm_fb_helper *fbdev;
67
68 uint32_t next_fence, completed_fence;
69 wait_queue_head_t fence_event;
70
71 /* list of GEM objects: */
72 struct list_head inactive_list;
73
74 struct workqueue_struct *wq;
75
76 /* registered IOMMU domains: */
77 unsigned int num_iommus;
78 struct iommu_domain *iommus[NUM_DOMAINS];
79
80 unsigned int num_crtcs;
81 struct drm_crtc *crtcs[8];
82
83 unsigned int num_encoders;
84 struct drm_encoder *encoders[8];
85
86 unsigned int num_bridges;
87 struct drm_bridge *bridges[8];
88
89 unsigned int num_connectors;
90 struct drm_connector *connectors[8];
91};
92
93struct msm_format {
94 uint32_t pixel_format;
95};
96
97/* As there are different display controller blocks depending on the
98 * snapdragon version, the kms support is split out and the appropriate
99 * implementation is loaded at runtime. The kms module is responsible
100 * for constructing the appropriate planes/crtcs/encoders/connectors.
101 */
102struct msm_kms_funcs {
103 /* hw initialization: */
104 int (*hw_init)(struct msm_kms *kms);
105 /* irq handling: */
106 void (*irq_preinstall)(struct msm_kms *kms);
107 int (*irq_postinstall)(struct msm_kms *kms);
108 void (*irq_uninstall)(struct msm_kms *kms);
109 irqreturn_t (*irq)(struct msm_kms *kms);
110 int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
111 void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
112 /* misc: */
113 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
114 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
115 struct drm_encoder *encoder);
116 /* cleanup: */
117 void (*preclose)(struct msm_kms *kms, struct drm_file *file);
118 void (*destroy)(struct msm_kms *kms);
119};
120
121struct msm_kms {
122 const struct msm_kms_funcs *funcs;
123};
124
125struct msm_kms *mdp4_kms_init(struct drm_device *dev);
126
127int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
128int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
129 const char **names, int cnt);
130
131int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
132 struct timespec *timeout);
133void msm_update_fence(struct drm_device *dev, uint32_t fence);
134
135int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
136 struct drm_file *file);
137
138int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
139int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
140uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
141int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
142 uint32_t *iova);
143int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
144void msm_gem_put_iova(struct drm_gem_object *obj, int id);
145int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
146 struct drm_mode_create_dumb *args);
147int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
148 uint32_t handle);
149int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
150 uint32_t handle, uint64_t *offset);
151void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
152void *msm_gem_vaddr(struct drm_gem_object *obj);
153int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
154 struct work_struct *work);
155void msm_gem_move_to_active(struct drm_gem_object *obj,
156 struct msm_gpu *gpu, uint32_t fence);
157void msm_gem_move_to_inactive(struct drm_gem_object *obj);
158int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
159 struct timespec *timeout);
160int msm_gem_cpu_fini(struct drm_gem_object *obj);
161void msm_gem_free_object(struct drm_gem_object *obj);
162int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
163 uint32_t size, uint32_t flags, uint32_t *handle);
164struct drm_gem_object *msm_gem_new(struct drm_device *dev,
165 uint32_t size, uint32_t flags);
166
167struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
168const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
169struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
170 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
171struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
172 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
173
174struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
175
176int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
177void __init hdmi_register(void);
178void __exit hdmi_unregister(void);
179
180#ifdef CONFIG_DEBUG_FS
181void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
182void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
183void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
184#endif
185
186void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
187 const char *dbgname);
188void msm_writel(u32 data, void __iomem *addr);
189u32 msm_readl(const void __iomem *addr);
190
191#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
192#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
193
194static inline int align_pitch(int width, int bpp)
195{
196 int bytespp = (bpp + 7) / 8;
197 /* adreno needs pitch aligned to 32 pixels: */
198 return bytespp * ALIGN(width, 32);
199}
200
201/* for the generated headers: */
202#define INVALID_IDX(idx) ({BUG(); 0;})
203#define fui(x) ({BUG(); 0;})
204#define util_float_to_half(x) ({BUG(); 0;})
205
206
207#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
208
209/* for conditionally setting boolean flag(s): */
210#define COND(bool, val) ((bool) ? (val) : 0)
211
212
213#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
new file mode 100644
index 000000000000..0286c0eeb10c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19
20#include "drm_crtc.h"
21#include "drm_crtc_helper.h"
22
23struct msm_framebuffer {
24 struct drm_framebuffer base;
25 const struct msm_format *format;
26 struct drm_gem_object *planes[2];
27};
28#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
29
30
31static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
32 struct drm_file *file_priv,
33 unsigned int *handle)
34{
35 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
36 return drm_gem_handle_create(file_priv,
37 msm_fb->planes[0], handle);
38}
39
40static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
41{
42 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
43 int i, n = drm_format_num_planes(fb->pixel_format);
44
45 DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
46
47 drm_framebuffer_cleanup(fb);
48
49 for (i = 0; i < n; i++) {
50 struct drm_gem_object *bo = msm_fb->planes[i];
51 if (bo)
52 drm_gem_object_unreference_unlocked(bo);
53 }
54
55 kfree(msm_fb);
56}
57
58static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
59 struct drm_file *file_priv, unsigned flags, unsigned color,
60 struct drm_clip_rect *clips, unsigned num_clips)
61{
62 return 0;
63}
64
65static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
66 .create_handle = msm_framebuffer_create_handle,
67 .destroy = msm_framebuffer_destroy,
68 .dirty = msm_framebuffer_dirty,
69};
70
71#ifdef CONFIG_DEBUG_FS
72void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
73{
74 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
75 int i, n = drm_format_num_planes(fb->pixel_format);
76
77 seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
78 fb->width, fb->height, (char *)&fb->pixel_format,
79 fb->refcount.refcount.counter, fb->base.id);
80
81 for (i = 0; i < n; i++) {
82 seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
83 i, fb->offsets[i], fb->pitches[i]);
84 msm_gem_describe(msm_fb->planes[i], m);
85 }
86}
87#endif
88
89struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
90{
91 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
92 return msm_fb->planes[plane];
93}
94
95const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
96{
97 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
98 return msm_fb->format;
99}
100
101struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
102 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
103{
104 struct drm_gem_object *bos[4] = {0};
105 struct drm_framebuffer *fb;
106 int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
107
108 for (i = 0; i < n; i++) {
109 bos[i] = drm_gem_object_lookup(dev, file,
110 mode_cmd->handles[i]);
111 if (!bos[i]) {
112 ret = -ENXIO;
113 goto out_unref;
114 }
115 }
116
117 fb = msm_framebuffer_init(dev, mode_cmd, bos);
118 if (IS_ERR(fb)) {
119 ret = PTR_ERR(fb);
120 goto out_unref;
121 }
122
123 return fb;
124
125out_unref:
126 for (i = 0; i < n; i++)
127 drm_gem_object_unreference_unlocked(bos[i]);
128 return ERR_PTR(ret);
129}
130
131struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
132 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
133{
134 struct msm_drm_private *priv = dev->dev_private;
135 struct msm_kms *kms = priv->kms;
136 struct msm_framebuffer *msm_fb;
137 struct drm_framebuffer *fb = NULL;
138 const struct msm_format *format;
139 int ret, i, n;
140 unsigned int hsub, vsub;
141
142 DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
143 dev, mode_cmd, mode_cmd->width, mode_cmd->height,
144 (char *)&mode_cmd->pixel_format);
145
146 n = drm_format_num_planes(mode_cmd->pixel_format);
147 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
148 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
149
150 format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
151 if (!format) {
152 dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
153 (char *)&mode_cmd->pixel_format);
154 ret = -EINVAL;
155 goto fail;
156 }
157
158 msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
159 if (!msm_fb) {
160 ret = -ENOMEM;
161 goto fail;
162 }
163
164 fb = &msm_fb->base;
165
166 msm_fb->format = format;
167
168 for (i = 0; i < n; i++) {
169 unsigned int width = mode_cmd->width / (i ? hsub : 1);
170 unsigned int height = mode_cmd->height / (i ? vsub : 1);
171 unsigned int min_size;
172
173 min_size = (height - 1) * mode_cmd->pitches[i]
174 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
175 + mode_cmd->offsets[i];
176
177 if (bos[i]->size < min_size) {
178 ret = -EINVAL;
179 goto fail;
180 }
181
182 msm_fb->planes[i] = bos[i];
183 }
184
185 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
186
187 ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
188 if (ret) {
189 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
190 goto fail;
191 }
192
193 DBG("create: FB ID: %d (%p)", fb->base.id, fb);
194
195 return fb;
196
197fail:
198 if (fb)
199 msm_framebuffer_destroy(fb);
200
201 return ERR_PTR(ret);
202}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
new file mode 100644
index 000000000000..6c6d7d4c9b4e
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -0,0 +1,258 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19
20#include "drm_crtc.h"
21#include "drm_fb_helper.h"
22
23/*
24 * fbdev funcs, to implement legacy fbdev interface on top of drm driver
25 */
26
27#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
28
29struct msm_fbdev {
30 struct drm_fb_helper base;
31 struct drm_framebuffer *fb;
32 struct drm_gem_object *bo;
33};
34
35static struct fb_ops msm_fb_ops = {
36 .owner = THIS_MODULE,
37
38 /* Note: to properly handle manual update displays, we wrap the
39 * basic fbdev ops which write to the framebuffer
40 */
41 .fb_read = fb_sys_read,
42 .fb_write = fb_sys_write,
43 .fb_fillrect = sys_fillrect,
44 .fb_copyarea = sys_copyarea,
45 .fb_imageblit = sys_imageblit,
46
47 .fb_check_var = drm_fb_helper_check_var,
48 .fb_set_par = drm_fb_helper_set_par,
49 .fb_pan_display = drm_fb_helper_pan_display,
50 .fb_blank = drm_fb_helper_blank,
51 .fb_setcmap = drm_fb_helper_setcmap,
52};
53
54static int msm_fbdev_create(struct drm_fb_helper *helper,
55 struct drm_fb_helper_surface_size *sizes)
56{
57 struct msm_fbdev *fbdev = to_msm_fbdev(helper);
58 struct drm_device *dev = helper->dev;
59 struct drm_framebuffer *fb = NULL;
60 struct fb_info *fbi = NULL;
61 struct drm_mode_fb_cmd2 mode_cmd = {0};
62 dma_addr_t paddr;
63 int ret, size;
64
65 /* only doing ARGB32 since this is what is needed to alpha-blend
66 * with video overlays:
67 */
68 sizes->surface_bpp = 32;
69 sizes->surface_depth = 32;
70
71 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
72 sizes->surface_height, sizes->surface_bpp,
73 sizes->fb_width, sizes->fb_height);
74
75 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
76 sizes->surface_depth);
77
78 mode_cmd.width = sizes->surface_width;
79 mode_cmd.height = sizes->surface_height;
80
81 mode_cmd.pitches[0] = align_pitch(
82 mode_cmd.width, sizes->surface_bpp);
83
84 /* allocate backing bo */
85 size = mode_cmd.pitches[0] * mode_cmd.height;
86 DBG("allocating %d bytes for fb %d", size, dev->primary->index);
87 mutex_lock(&dev->struct_mutex);
88 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
89 mutex_unlock(&dev->struct_mutex);
90 if (IS_ERR(fbdev->bo)) {
91 ret = PTR_ERR(fbdev->bo);
92 fbdev->bo = NULL;
93 dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
94 goto fail;
95 }
96
97 fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
98 if (IS_ERR(fb)) {
99 dev_err(dev->dev, "failed to allocate fb\n");
100 /* note: if fb creation failed, we can't rely on fb destroy
101 * to unref the bo:
102 */
103 drm_gem_object_unreference(fbdev->bo);
104 ret = PTR_ERR(fb);
105 goto fail;
106 }
107
108 mutex_lock(&dev->struct_mutex);
109
110 /* TODO implement our own fb_mmap so we don't need this: */
111 msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
112
113 fbi = framebuffer_alloc(0, dev->dev);
114 if (!fbi) {
115 dev_err(dev->dev, "failed to allocate fb info\n");
116 ret = -ENOMEM;
117 goto fail_unlock;
118 }
119
120 DBG("fbi=%p, dev=%p", fbi, dev);
121
122 fbdev->fb = fb;
123 helper->fb = fb;
124 helper->fbdev = fbi;
125
126 fbi->par = helper;
127 fbi->flags = FBINFO_DEFAULT;
128 fbi->fbops = &msm_fb_ops;
129
130 strcpy(fbi->fix.id, "msm");
131
132 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
133 if (ret) {
134 ret = -ENOMEM;
135 goto fail_unlock;
136 }
137
138 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
139 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
140
141 dev->mode_config.fb_base = paddr;
142
143 fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
144 fbi->screen_size = fbdev->bo->size;
145 fbi->fix.smem_start = paddr;
146 fbi->fix.smem_len = fbdev->bo->size;
147
148 DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
149 DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
150
151 mutex_unlock(&dev->struct_mutex);
152
153 return 0;
154
155fail_unlock:
156 mutex_unlock(&dev->struct_mutex);
157fail:
158
159 if (ret) {
160 if (fbi)
161 framebuffer_release(fbi);
162 if (fb) {
163 drm_framebuffer_unregister_private(fb);
164 drm_framebuffer_remove(fb);
165 }
166 }
167
168 return ret;
169}
170
171static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
172 u16 red, u16 green, u16 blue, int regno)
173{
174 DBG("fbdev: set gamma");
175}
176
177static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
178 u16 *red, u16 *green, u16 *blue, int regno)
179{
180 DBG("fbdev: get gamma");
181}
182
183static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
184 .gamma_set = msm_crtc_fb_gamma_set,
185 .gamma_get = msm_crtc_fb_gamma_get,
186 .fb_probe = msm_fbdev_create,
187};
188
189/* initialize fbdev helper */
190struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
191{
192 struct msm_drm_private *priv = dev->dev_private;
193 struct msm_fbdev *fbdev = NULL;
194 struct drm_fb_helper *helper;
195 int ret = 0;
196
197 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
198 if (!fbdev)
199 goto fail;
200
201 helper = &fbdev->base;
202
203 helper->funcs = &msm_fb_helper_funcs;
204
205 ret = drm_fb_helper_init(dev, helper,
206 priv->num_crtcs, priv->num_connectors);
207 if (ret) {
208 dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
209 goto fail;
210 }
211
212 drm_fb_helper_single_add_all_connectors(helper);
213
214 /* disable all the possible outputs/crtcs before entering KMS mode */
215 drm_helper_disable_unused_functions(dev);
216
217 drm_fb_helper_initial_config(helper, 32);
218
219 priv->fbdev = helper;
220
221 return helper;
222
223fail:
224 kfree(fbdev);
225 return NULL;
226}
227
228void msm_fbdev_free(struct drm_device *dev)
229{
230 struct msm_drm_private *priv = dev->dev_private;
231 struct drm_fb_helper *helper = priv->fbdev;
232 struct msm_fbdev *fbdev;
233 struct fb_info *fbi;
234
235 DBG();
236
237 fbi = helper->fbdev;
238
239 /* only cleanup framebuffer if it is present */
240 if (fbi) {
241 unregister_framebuffer(fbi);
242 framebuffer_release(fbi);
243 }
244
245 drm_fb_helper_fini(helper);
246
247 fbdev = to_msm_fbdev(priv->fbdev);
248
249 /* this will free the backing object */
250 if (fbdev->fb) {
251 drm_framebuffer_unregister_private(fbdev->fb);
252 drm_framebuffer_remove(fbdev->fb);
253 }
254
255 kfree(fbdev);
256
257 priv->fbdev = NULL;
258}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
new file mode 100644
index 000000000000..6b5a6c8c7658
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -0,0 +1,597 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20
21#include "msm_drv.h"
22#include "msm_gem.h"
23#include "msm_gpu.h"
24
25
26/* called with dev->struct_mutex held */
27static struct page **get_pages(struct drm_gem_object *obj)
28{
29 struct msm_gem_object *msm_obj = to_msm_bo(obj);
30
31 if (!msm_obj->pages) {
32 struct drm_device *dev = obj->dev;
33 struct page **p = drm_gem_get_pages(obj, 0);
34 int npages = obj->size >> PAGE_SHIFT;
35
36 if (IS_ERR(p)) {
37 dev_err(dev->dev, "could not get pages: %ld\n",
38 PTR_ERR(p));
39 return p;
40 }
41
42 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
43 if (!msm_obj->sgt) {
44 dev_err(dev->dev, "failed to allocate sgt\n");
45 return ERR_PTR(-ENOMEM);
46 }
47
48 msm_obj->pages = p;
49
50 /* For non-cached buffers, ensure the new pages are clean
51 * because display controller, GPU, etc. are not coherent:
52 */
53 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
54 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
55 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
56 }
57
58 return msm_obj->pages;
59}
60
61static void put_pages(struct drm_gem_object *obj)
62{
63 struct msm_gem_object *msm_obj = to_msm_bo(obj);
64
65 if (msm_obj->pages) {
66 /* For non-cached buffers, ensure the new pages are clean
67 * because display controller, GPU, etc. are not coherent:
68 */
69 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
70 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
71 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
72 sg_free_table(msm_obj->sgt);
73 kfree(msm_obj->sgt);
74
75 drm_gem_put_pages(obj, msm_obj->pages, true, false);
76 msm_obj->pages = NULL;
77 }
78}
79
80int msm_gem_mmap_obj(struct drm_gem_object *obj,
81 struct vm_area_struct *vma)
82{
83 struct msm_gem_object *msm_obj = to_msm_bo(obj);
84
85 vma->vm_flags &= ~VM_PFNMAP;
86 vma->vm_flags |= VM_MIXEDMAP;
87
88 if (msm_obj->flags & MSM_BO_WC) {
89 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
90 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
91 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
92 } else {
93 /*
94 * Shunt off cached objs to shmem file so they have their own
95 * address_space (so unmap_mapping_range does what we want,
96 * in particular in the case of mmap'd dmabufs)
97 */
98 fput(vma->vm_file);
99 get_file(obj->filp);
100 vma->vm_pgoff = 0;
101 vma->vm_file = obj->filp;
102
103 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
104 }
105
106 return 0;
107}
108
109int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
110{
111 int ret;
112
113 ret = drm_gem_mmap(filp, vma);
114 if (ret) {
115 DBG("mmap failed: %d", ret);
116 return ret;
117 }
118
119 return msm_gem_mmap_obj(vma->vm_private_data, vma);
120}
121
122int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
123{
124 struct drm_gem_object *obj = vma->vm_private_data;
125 struct msm_gem_object *msm_obj = to_msm_bo(obj);
126 struct drm_device *dev = obj->dev;
127 struct page **pages;
128 unsigned long pfn;
129 pgoff_t pgoff;
130 int ret;
131
132 /* Make sure we don't parallel update on a fault, nor move or remove
133 * something from beneath our feet
134 */
135 ret = mutex_lock_interruptible(&dev->struct_mutex);
136 if (ret)
137 goto out;
138
139 /* make sure we have pages attached now */
140 pages = get_pages(obj);
141 if (IS_ERR(pages)) {
142 ret = PTR_ERR(pages);
143 goto out_unlock;
144 }
145
146 /* We don't use vmf->pgoff since that has the fake offset: */
147 pgoff = ((unsigned long)vmf->virtual_address -
148 vma->vm_start) >> PAGE_SHIFT;
149
150 pfn = page_to_pfn(msm_obj->pages[pgoff]);
151
152 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
153 pfn, pfn << PAGE_SHIFT);
154
155 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
156
157out_unlock:
158 mutex_unlock(&dev->struct_mutex);
159out:
160 switch (ret) {
161 case -EAGAIN:
162 set_need_resched();
163 case 0:
164 case -ERESTARTSYS:
165 case -EINTR:
166 return VM_FAULT_NOPAGE;
167 case -ENOMEM:
168 return VM_FAULT_OOM;
169 default:
170 return VM_FAULT_SIGBUS;
171 }
172}
173
174/** get mmap offset */
175static uint64_t mmap_offset(struct drm_gem_object *obj)
176{
177 struct drm_device *dev = obj->dev;
178 int ret;
179
180 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
181
182 /* Make it mmapable */
183 ret = drm_gem_create_mmap_offset(obj);
184
185 if (ret) {
186 dev_err(dev->dev, "could not allocate mmap offset\n");
187 return 0;
188 }
189
190 return drm_vma_node_offset_addr(&obj->vma_node);
191}
192
193uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
194{
195 uint64_t offset;
196 mutex_lock(&obj->dev->struct_mutex);
197 offset = mmap_offset(obj);
198 mutex_unlock(&obj->dev->struct_mutex);
199 return offset;
200}
201
202/* helpers for dealing w/ iommu: */
203static int map_range(struct iommu_domain *domain, unsigned int iova,
204 struct sg_table *sgt, unsigned int len, int prot)
205{
206 struct scatterlist *sg;
207 unsigned int da = iova;
208 unsigned int i, j;
209 int ret;
210
211 if (!domain || !sgt)
212 return -EINVAL;
213
214 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
215 u32 pa = sg_phys(sg) - sg->offset;
216 size_t bytes = sg->length + sg->offset;
217
218 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
219
220 ret = iommu_map(domain, da, pa, bytes, prot);
221 if (ret)
222 goto fail;
223
224 da += bytes;
225 }
226
227 return 0;
228
229fail:
230 da = iova;
231
232 for_each_sg(sgt->sgl, sg, i, j) {
233 size_t bytes = sg->length + sg->offset;
234 iommu_unmap(domain, da, bytes);
235 da += bytes;
236 }
237 return ret;
238}
239
240static void unmap_range(struct iommu_domain *domain, unsigned int iova,
241 struct sg_table *sgt, unsigned int len)
242{
243 struct scatterlist *sg;
244 unsigned int da = iova;
245 int i;
246
247 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
248 size_t bytes = sg->length + sg->offset;
249 size_t unmapped;
250
251 unmapped = iommu_unmap(domain, da, bytes);
252 if (unmapped < bytes)
253 break;
254
255 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
256
257 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
258
259 da += bytes;
260 }
261}
262
263/* should be called under struct_mutex.. although it can be called
264 * from atomic context without struct_mutex to acquire an extra
265 * iova ref if you know one is already held.
266 *
267 * That means when I do eventually need to add support for unpinning
268 * the refcnt counter needs to be atomic_t.
269 */
270int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
271 uint32_t *iova)
272{
273 struct msm_gem_object *msm_obj = to_msm_bo(obj);
274 int ret = 0;
275
276 if (!msm_obj->domain[id].iova) {
277 struct msm_drm_private *priv = obj->dev->dev_private;
278 uint32_t offset = (uint32_t)mmap_offset(obj);
279 struct page **pages;
280 pages = get_pages(obj);
281 if (IS_ERR(pages))
282 return PTR_ERR(pages);
283 // XXX ideally we would not map buffers writable when not needed...
284 ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
285 obj->size, IOMMU_READ | IOMMU_WRITE);
286 msm_obj->domain[id].iova = offset;
287 }
288
289 if (!ret)
290 *iova = msm_obj->domain[id].iova;
291
292 return ret;
293}
294
295int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
296{
297 int ret;
298 mutex_lock(&obj->dev->struct_mutex);
299 ret = msm_gem_get_iova_locked(obj, id, iova);
300 mutex_unlock(&obj->dev->struct_mutex);
301 return ret;
302}
303
304void msm_gem_put_iova(struct drm_gem_object *obj, int id)
305{
306 // XXX TODO ..
307 // NOTE: probably don't need a _locked() version.. we wouldn't
308 // normally unmap here, but instead just mark that it could be
309 // unmapped (if the iova refcnt drops to zero), but then later
310 // if another _get_iova_locked() fails we can start unmapping
311 // things that are no longer needed..
312}
313
314int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
315 struct drm_mode_create_dumb *args)
316{
317 args->pitch = align_pitch(args->width, args->bpp);
318 args->size = PAGE_ALIGN(args->pitch * args->height);
319 return msm_gem_new_handle(dev, file, args->size,
320 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
321}
322
323int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
324 uint32_t handle)
325{
326 /* No special work needed, drop the reference and see what falls out */
327 return drm_gem_handle_delete(file, handle);
328}
329
330int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
331 uint32_t handle, uint64_t *offset)
332{
333 struct drm_gem_object *obj;
334 int ret = 0;
335
336 /* GEM does all our handle to object mapping */
337 obj = drm_gem_object_lookup(dev, file, handle);
338 if (obj == NULL) {
339 ret = -ENOENT;
340 goto fail;
341 }
342
343 *offset = msm_gem_mmap_offset(obj);
344
345 drm_gem_object_unreference_unlocked(obj);
346
347fail:
348 return ret;
349}
350
351void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
352{
353 struct msm_gem_object *msm_obj = to_msm_bo(obj);
354 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
355 if (!msm_obj->vaddr) {
356 struct page **pages = get_pages(obj);
357 if (IS_ERR(pages))
358 return ERR_CAST(pages);
359 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
360 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
361 }
362 return msm_obj->vaddr;
363}
364
365void *msm_gem_vaddr(struct drm_gem_object *obj)
366{
367 void *ret;
368 mutex_lock(&obj->dev->struct_mutex);
369 ret = msm_gem_vaddr_locked(obj);
370 mutex_unlock(&obj->dev->struct_mutex);
371 return ret;
372}
373
374int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
375 struct work_struct *work)
376{
377 struct drm_device *dev = obj->dev;
378 struct msm_drm_private *priv = dev->dev_private;
379 struct msm_gem_object *msm_obj = to_msm_bo(obj);
380 int ret = 0;
381
382 mutex_lock(&dev->struct_mutex);
383 if (!list_empty(&work->entry)) {
384 ret = -EINVAL;
385 } else if (is_active(msm_obj)) {
386 list_add_tail(&work->entry, &msm_obj->inactive_work);
387 } else {
388 queue_work(priv->wq, work);
389 }
390 mutex_unlock(&dev->struct_mutex);
391
392 return ret;
393}
394
395void msm_gem_move_to_active(struct drm_gem_object *obj,
396 struct msm_gpu *gpu, uint32_t fence)
397{
398 struct msm_gem_object *msm_obj = to_msm_bo(obj);
399 msm_obj->gpu = gpu;
400 msm_obj->fence = fence;
401 list_del_init(&msm_obj->mm_list);
402 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
403}
404
405void msm_gem_move_to_inactive(struct drm_gem_object *obj)
406{
407 struct drm_device *dev = obj->dev;
408 struct msm_drm_private *priv = dev->dev_private;
409 struct msm_gem_object *msm_obj = to_msm_bo(obj);
410
411 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
412
413 msm_obj->gpu = NULL;
414 msm_obj->fence = 0;
415 list_del_init(&msm_obj->mm_list);
416 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
417
418 while (!list_empty(&msm_obj->inactive_work)) {
419 struct work_struct *work;
420
421 work = list_first_entry(&msm_obj->inactive_work,
422 struct work_struct, entry);
423
424 list_del_init(&work->entry);
425 queue_work(priv->wq, work);
426 }
427}
428
429int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
430 struct timespec *timeout)
431{
432 struct drm_device *dev = obj->dev;
433 struct msm_gem_object *msm_obj = to_msm_bo(obj);
434 int ret = 0;
435
436 if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
437 ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
438
439 /* TODO cache maintenance */
440
441 return ret;
442}
443
444int msm_gem_cpu_fini(struct drm_gem_object *obj)
445{
446 /* TODO cache maintenance */
447 return 0;
448}
449
450#ifdef CONFIG_DEBUG_FS
451void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
452{
453 struct drm_device *dev = obj->dev;
454 struct msm_gem_object *msm_obj = to_msm_bo(obj);
455 uint64_t off = drm_vma_node_start(&obj->vma_node);
456
457 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
458 seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
459 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
460 msm_obj->fence, obj->name, obj->refcount.refcount.counter,
461 off, msm_obj->vaddr, obj->size);
462}
463
464void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
465{
466 struct msm_gem_object *msm_obj;
467 int count = 0;
468 size_t size = 0;
469
470 list_for_each_entry(msm_obj, list, mm_list) {
471 struct drm_gem_object *obj = &msm_obj->base;
472 seq_printf(m, " ");
473 msm_gem_describe(obj, m);
474 count++;
475 size += obj->size;
476 }
477
478 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
479}
480#endif
481
482void msm_gem_free_object(struct drm_gem_object *obj)
483{
484 struct drm_device *dev = obj->dev;
485 struct msm_gem_object *msm_obj = to_msm_bo(obj);
486 int id;
487
488 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
489
490 /* object should not be on active list: */
491 WARN_ON(is_active(msm_obj));
492
493 list_del(&msm_obj->mm_list);
494
495 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
496 if (msm_obj->domain[id].iova) {
497 struct msm_drm_private *priv = obj->dev->dev_private;
498 uint32_t offset = (uint32_t)mmap_offset(obj);
499 unmap_range(priv->iommus[id], offset,
500 msm_obj->sgt, obj->size);
501 }
502 }
503
504 drm_gem_free_mmap_offset(obj);
505
506 if (msm_obj->vaddr)
507 vunmap(msm_obj->vaddr);
508
509 put_pages(obj);
510
511 if (msm_obj->resv == &msm_obj->_resv)
512 reservation_object_fini(msm_obj->resv);
513
514 drm_gem_object_release(obj);
515
516 kfree(msm_obj);
517}
518
519/* convenience method to construct a GEM buffer object, and userspace handle */
520int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
521 uint32_t size, uint32_t flags, uint32_t *handle)
522{
523 struct drm_gem_object *obj;
524 int ret;
525
526 ret = mutex_lock_interruptible(&dev->struct_mutex);
527 if (ret)
528 return ret;
529
530 obj = msm_gem_new(dev, size, flags);
531
532 mutex_unlock(&dev->struct_mutex);
533
534 if (IS_ERR(obj))
535 return PTR_ERR(obj);
536
537 ret = drm_gem_handle_create(file, obj, handle);
538
539 /* drop reference from allocate - handle holds it now */
540 drm_gem_object_unreference_unlocked(obj);
541
542 return ret;
543}
544
545struct drm_gem_object *msm_gem_new(struct drm_device *dev,
546 uint32_t size, uint32_t flags)
547{
548 struct msm_drm_private *priv = dev->dev_private;
549 struct msm_gem_object *msm_obj;
550 struct drm_gem_object *obj = NULL;
551 int ret;
552
553 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
554
555 size = PAGE_ALIGN(size);
556
557 switch (flags & MSM_BO_CACHE_MASK) {
558 case MSM_BO_UNCACHED:
559 case MSM_BO_CACHED:
560 case MSM_BO_WC:
561 break;
562 default:
563 dev_err(dev->dev, "invalid cache flag: %x\n",
564 (flags & MSM_BO_CACHE_MASK));
565 ret = -EINVAL;
566 goto fail;
567 }
568
569 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
570 if (!msm_obj) {
571 ret = -ENOMEM;
572 goto fail;
573 }
574
575 obj = &msm_obj->base;
576
577 ret = drm_gem_object_init(dev, obj, size);
578 if (ret)
579 goto fail;
580
581 msm_obj->flags = flags;
582
583 msm_obj->resv = &msm_obj->_resv;
584 reservation_object_init(msm_obj->resv);
585
586 INIT_LIST_HEAD(&msm_obj->submit_entry);
587 INIT_LIST_HEAD(&msm_obj->inactive_work);
588 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
589
590 return obj;
591
592fail:
593 if (obj)
594 drm_gem_object_unreference_unlocked(obj);
595
596 return ERR_PTR(ret);
597}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
new file mode 100644
index 000000000000..d746f13d283c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_GEM_H__
19#define __MSM_GEM_H__
20
21#include <linux/reservation.h>
22#include "msm_drv.h"
23
24struct msm_gem_object {
25 struct drm_gem_object base;
26
27 uint32_t flags;
28
29 /* And object is either:
30 * inactive - on priv->inactive_list
31 * active - on one one of the gpu's active_list.. well, at
32 * least for now we don't have (I don't think) hw sync between
33 * 2d and 3d one devices which have both, meaning we need to
34 * block on submit if a bo is already on other ring
35 *
36 */
37 struct list_head mm_list;
38 struct msm_gpu *gpu; /* non-null if active */
39 uint32_t fence;
40
41 /* Transiently in the process of submit ioctl, objects associated
42 * with the submit are on submit->bo_list.. this only lasts for
43 * the duration of the ioctl, so one bo can never be on multiple
44 * submit lists.
45 */
46 struct list_head submit_entry;
47
48 /* work defered until bo is inactive: */
49 struct list_head inactive_work;
50
51 struct page **pages;
52 struct sg_table *sgt;
53 void *vaddr;
54
55 struct {
56 // XXX
57 uint32_t iova;
58 } domain[NUM_DOMAINS];
59
60 /* normally (resv == &_resv) except for imported bo's */
61 struct reservation_object *resv;
62 struct reservation_object _resv;
63};
64#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
65
66static inline bool is_active(struct msm_gem_object *msm_obj)
67{
68 return msm_obj->gpu != NULL;
69}
70
71#define MAX_CMDS 4
72
73/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
74 * associated with the cmdstream submission for synchronization (and
75 * make it easier to unwind when things go wrong, etc). This only
76 * lasts for the duration of the submit-ioctl.
77 */
78struct msm_gem_submit {
79 struct drm_device *dev;
80 struct msm_gpu *gpu;
81 struct list_head bo_list;
82 struct ww_acquire_ctx ticket;
83 uint32_t fence;
84 bool valid;
85 unsigned int nr_cmds;
86 unsigned int nr_bos;
87 struct {
88 uint32_t type;
89 uint32_t size; /* in dwords */
90 uint32_t iova;
91 } cmd[MAX_CMDS];
92 struct {
93 uint32_t flags;
94 struct msm_gem_object *obj;
95 uint32_t iova;
96 } bos[0];
97};
98
99#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
new file mode 100644
index 000000000000..3e1ef3a00f60
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -0,0 +1,412 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gpu.h"
20#include "msm_gem.h"
21
22/*
23 * Cmdstream submission:
24 */
25
26#define BO_INVALID_FLAGS ~(MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
27/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
28#define BO_VALID 0x8000
29#define BO_LOCKED 0x4000
30#define BO_PINNED 0x2000
31
32static inline void __user *to_user_ptr(u64 address)
33{
34 return (void __user *)(uintptr_t)address;
35}
36
37static struct msm_gem_submit *submit_create(struct drm_device *dev,
38 struct msm_gpu *gpu, int nr)
39{
40 struct msm_gem_submit *submit;
41 int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
42
43 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
44 if (submit) {
45 submit->dev = dev;
46 submit->gpu = gpu;
47
48 /* initially, until copy_from_user() and bo lookup succeeds: */
49 submit->nr_bos = 0;
50 submit->nr_cmds = 0;
51
52 INIT_LIST_HEAD(&submit->bo_list);
53 ww_acquire_init(&submit->ticket, &reservation_ww_class);
54 }
55
56 return submit;
57}
58
59static int submit_lookup_objects(struct msm_gem_submit *submit,
60 struct drm_msm_gem_submit *args, struct drm_file *file)
61{
62 unsigned i;
63 int ret = 0;
64
65 spin_lock(&file->table_lock);
66
67 for (i = 0; i < args->nr_bos; i++) {
68 struct drm_msm_gem_submit_bo submit_bo;
69 struct drm_gem_object *obj;
70 struct msm_gem_object *msm_obj;
71 void __user *userptr =
72 to_user_ptr(args->bos + (i * sizeof(submit_bo)));
73
74 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
75 if (ret) {
76 ret = -EFAULT;
77 goto out_unlock;
78 }
79
80 if (submit_bo.flags & BO_INVALID_FLAGS) {
81 DBG("invalid flags: %x", submit_bo.flags);
82 ret = -EINVAL;
83 goto out_unlock;
84 }
85
86 submit->bos[i].flags = submit_bo.flags;
87 /* in validate_objects() we figure out if this is true: */
88 submit->bos[i].iova = submit_bo.presumed;
89
90 /* normally use drm_gem_object_lookup(), but for bulk lookup
91 * all under single table_lock just hit object_idr directly:
92 */
93 obj = idr_find(&file->object_idr, submit_bo.handle);
94 if (!obj) {
95 DBG("invalid handle %u at index %u", submit_bo.handle, i);
96 ret = -EINVAL;
97 goto out_unlock;
98 }
99
100 msm_obj = to_msm_bo(obj);
101
102 if (!list_empty(&msm_obj->submit_entry)) {
103 DBG("handle %u at index %u already on submit list",
104 submit_bo.handle, i);
105 ret = -EINVAL;
106 goto out_unlock;
107 }
108
109 drm_gem_object_reference(obj);
110
111 submit->bos[i].obj = msm_obj;
112
113 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
114 }
115
116out_unlock:
117 submit->nr_bos = i;
118 spin_unlock(&file->table_lock);
119
120 return ret;
121}
122
123static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
124{
125 struct msm_gem_object *msm_obj = submit->bos[i].obj;
126
127 if (submit->bos[i].flags & BO_PINNED)
128 msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
129
130 if (submit->bos[i].flags & BO_LOCKED)
131 ww_mutex_unlock(&msm_obj->resv->lock);
132
133 if (!(submit->bos[i].flags & BO_VALID))
134 submit->bos[i].iova = 0;
135
136 submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
137}
138
139/* This is where we make sure all the bo's are reserved and pin'd: */
140static int submit_validate_objects(struct msm_gem_submit *submit)
141{
142 int contended, slow_locked = -1, i, ret = 0;
143
144retry:
145 submit->valid = true;
146
147 for (i = 0; i < submit->nr_bos; i++) {
148 struct msm_gem_object *msm_obj = submit->bos[i].obj;
149 uint32_t iova;
150
151 if (slow_locked == i)
152 slow_locked = -1;
153
154 contended = i;
155
156 if (!(submit->bos[i].flags & BO_LOCKED)) {
157 ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
158 &submit->ticket);
159 if (ret)
160 goto fail;
161 submit->bos[i].flags |= BO_LOCKED;
162 }
163
164
165 /* if locking succeeded, pin bo: */
166 ret = msm_gem_get_iova(&msm_obj->base,
167 submit->gpu->id, &iova);
168
169 /* this would break the logic in the fail path.. there is no
170 * reason for this to happen, but just to be on the safe side
171 * let's notice if this starts happening in the future:
172 */
173 WARN_ON(ret == -EDEADLK);
174
175 if (ret)
176 goto fail;
177
178 submit->bos[i].flags |= BO_PINNED;
179
180 if (iova == submit->bos[i].iova) {
181 submit->bos[i].flags |= BO_VALID;
182 } else {
183 submit->bos[i].iova = iova;
184 submit->bos[i].flags &= ~BO_VALID;
185 submit->valid = false;
186 }
187 }
188
189 ww_acquire_done(&submit->ticket);
190
191 return 0;
192
193fail:
194 for (; i >= 0; i--)
195 submit_unlock_unpin_bo(submit, i);
196
197 if (slow_locked > 0)
198 submit_unlock_unpin_bo(submit, slow_locked);
199
200 if (ret == -EDEADLK) {
201 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
202 /* we lost out in a seqno race, lock and retry.. */
203 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
204 &submit->ticket);
205 if (!ret) {
206 submit->bos[contended].flags |= BO_LOCKED;
207 slow_locked = contended;
208 goto retry;
209 }
210 }
211
212 return ret;
213}
214
215static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
216 struct msm_gem_object **obj, uint32_t *iova, bool *valid)
217{
218 if (idx >= submit->nr_bos) {
219 DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos);
220 return EINVAL;
221 }
222
223 if (obj)
224 *obj = submit->bos[idx].obj;
225 if (iova)
226 *iova = submit->bos[idx].iova;
227 if (valid)
228 *valid = !!(submit->bos[idx].flags & BO_VALID);
229
230 return 0;
231}
232
233/* process the reloc's and patch up the cmdstream as needed: */
234static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
235 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
236{
237 uint32_t i, last_offset = 0;
238 uint32_t *ptr;
239 int ret;
240
241 if (offset % 4) {
242 DBG("non-aligned cmdstream buffer: %u", offset);
243 return -EINVAL;
244 }
245
246 /* For now, just map the entire thing. Eventually we probably
247 * to do it page-by-page, w/ kmap() if not vmap()d..
248 */
249 ptr = msm_gem_vaddr(&obj->base);
250
251 if (IS_ERR(ptr)) {
252 ret = PTR_ERR(ptr);
253 DBG("failed to map: %d", ret);
254 return ret;
255 }
256
257 for (i = 0; i < nr_relocs; i++) {
258 struct drm_msm_gem_submit_reloc submit_reloc;
259 void __user *userptr =
260 to_user_ptr(relocs + (i * sizeof(submit_reloc)));
261 uint32_t iova, off;
262 bool valid;
263
264 ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
265 if (ret)
266 return -EFAULT;
267
268 if (submit_reloc.submit_offset % 4) {
269 DBG("non-aligned reloc offset: %u",
270 submit_reloc.submit_offset);
271 return -EINVAL;
272 }
273
274 /* offset in dwords: */
275 off = submit_reloc.submit_offset / 4;
276
277 if ((off >= (obj->base.size / 4)) ||
278 (off < last_offset)) {
279 DBG("invalid offset %u at reloc %u", off, i);
280 return -EINVAL;
281 }
282
283 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
284 if (ret)
285 return ret;
286
287 if (valid)
288 continue;
289
290 iova += submit_reloc.reloc_offset;
291
292 if (submit_reloc.shift < 0)
293 iova >>= -submit_reloc.shift;
294 else
295 iova <<= submit_reloc.shift;
296
297 ptr[off] = iova | submit_reloc.or;
298
299 last_offset = off;
300 }
301
302 return 0;
303}
304
305static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
306{
307 unsigned i;
308
309 mutex_lock(&submit->dev->struct_mutex);
310 for (i = 0; i < submit->nr_bos; i++) {
311 struct msm_gem_object *msm_obj = submit->bos[i].obj;
312 submit_unlock_unpin_bo(submit, i);
313 list_del_init(&msm_obj->submit_entry);
314 drm_gem_object_unreference(&msm_obj->base);
315 }
316 mutex_unlock(&submit->dev->struct_mutex);
317
318 ww_acquire_fini(&submit->ticket);
319 kfree(submit);
320}
321
322int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
323 struct drm_file *file)
324{
325 struct msm_drm_private *priv = dev->dev_private;
326 struct drm_msm_gem_submit *args = data;
327 struct msm_file_private *ctx = file->driver_priv;
328 struct msm_gem_submit *submit;
329 struct msm_gpu *gpu;
330 unsigned i;
331 int ret;
332
333 /* for now, we just have 3d pipe.. eventually this would need to
334 * be more clever to dispatch to appropriate gpu module:
335 */
336 if (args->pipe != MSM_PIPE_3D0)
337 return -EINVAL;
338
339 gpu = priv->gpu;
340
341 if (args->nr_cmds > MAX_CMDS)
342 return -EINVAL;
343
344 submit = submit_create(dev, gpu, args->nr_bos);
345 if (!submit) {
346 ret = -ENOMEM;
347 goto out;
348 }
349
350 ret = submit_lookup_objects(submit, args, file);
351 if (ret)
352 goto out;
353
354 ret = submit_validate_objects(submit);
355 if (ret)
356 goto out;
357
358 for (i = 0; i < args->nr_cmds; i++) {
359 struct drm_msm_gem_submit_cmd submit_cmd;
360 void __user *userptr =
361 to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
362 struct msm_gem_object *msm_obj;
363 uint32_t iova;
364
365 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
366 if (ret) {
367 ret = -EFAULT;
368 goto out;
369 }
370
371 ret = submit_bo(submit, submit_cmd.submit_idx,
372 &msm_obj, &iova, NULL);
373 if (ret)
374 goto out;
375
376 if (submit_cmd.size % 4) {
377 DBG("non-aligned cmdstream buffer size: %u",
378 submit_cmd.size);
379 ret = -EINVAL;
380 goto out;
381 }
382
383 if (submit_cmd.size >= msm_obj->base.size) {
384 DBG("invalid cmdstream size: %u", submit_cmd.size);
385 ret = -EINVAL;
386 goto out;
387 }
388
389 submit->cmd[i].type = submit_cmd.type;
390 submit->cmd[i].size = submit_cmd.size / 4;
391 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
392
393 if (submit->valid)
394 continue;
395
396 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
397 submit_cmd.nr_relocs, submit_cmd.relocs);
398 if (ret)
399 goto out;
400 }
401
402 submit->nr_cmds = i;
403
404 ret = msm_gpu_submit(gpu, submit, ctx);
405
406 args->fence = submit->fence;
407
408out:
409 if (submit)
410 submit_cleanup(submit, !!ret);
411 return ret;
412}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
new file mode 100644
index 000000000000..e1e1ec9321ff
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -0,0 +1,463 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_gpu.h"
19#include "msm_gem.h"
20
21
22/*
23 * Power Management:
24 */
25
26#ifdef CONFIG_MSM_BUS_SCALING
27#include <mach/board.h>
28#include <mach/kgsl.h>
29static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
30{
31 struct drm_device *dev = gpu->dev;
32 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
33
34 if (!pdev) {
35 dev_err(dev->dev, "could not find dtv pdata\n");
36 return;
37 }
38
39 if (pdata->bus_scale_table) {
40 gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
41 DBG("bus scale client: %08x", gpu->bsc);
42 }
43}
44
45static void bs_fini(struct msm_gpu *gpu)
46{
47 if (gpu->bsc) {
48 msm_bus_scale_unregister_client(gpu->bsc);
49 gpu->bsc = 0;
50 }
51}
52
53static void bs_set(struct msm_gpu *gpu, int idx)
54{
55 if (gpu->bsc) {
56 DBG("set bus scaling: %d", idx);
57 msm_bus_scale_client_update_request(gpu->bsc, idx);
58 }
59}
60#else
61static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
62static void bs_fini(struct msm_gpu *gpu) {}
63static void bs_set(struct msm_gpu *gpu, int idx) {}
64#endif
65
66static int enable_pwrrail(struct msm_gpu *gpu)
67{
68 struct drm_device *dev = gpu->dev;
69 int ret = 0;
70
71 if (gpu->gpu_reg) {
72 ret = regulator_enable(gpu->gpu_reg);
73 if (ret) {
74 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
75 return ret;
76 }
77 }
78
79 if (gpu->gpu_cx) {
80 ret = regulator_enable(gpu->gpu_cx);
81 if (ret) {
82 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
83 return ret;
84 }
85 }
86
87 return 0;
88}
89
90static int disable_pwrrail(struct msm_gpu *gpu)
91{
92 if (gpu->gpu_cx)
93 regulator_disable(gpu->gpu_cx);
94 if (gpu->gpu_reg)
95 regulator_disable(gpu->gpu_reg);
96 return 0;
97}
98
99static int enable_clk(struct msm_gpu *gpu)
100{
101 struct clk *rate_clk = NULL;
102 int i;
103
104 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
105 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
106 if (gpu->grp_clks[i]) {
107 clk_prepare(gpu->grp_clks[i]);
108 rate_clk = gpu->grp_clks[i];
109 }
110 }
111
112 if (rate_clk && gpu->fast_rate)
113 clk_set_rate(rate_clk, gpu->fast_rate);
114
115 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
116 if (gpu->grp_clks[i])
117 clk_enable(gpu->grp_clks[i]);
118
119 return 0;
120}
121
122static int disable_clk(struct msm_gpu *gpu)
123{
124 struct clk *rate_clk = NULL;
125 int i;
126
127 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
128 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
129 if (gpu->grp_clks[i]) {
130 clk_disable(gpu->grp_clks[i]);
131 rate_clk = gpu->grp_clks[i];
132 }
133 }
134
135 if (rate_clk && gpu->slow_rate)
136 clk_set_rate(rate_clk, gpu->slow_rate);
137
138 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
139 if (gpu->grp_clks[i])
140 clk_unprepare(gpu->grp_clks[i]);
141
142 return 0;
143}
144
145static int enable_axi(struct msm_gpu *gpu)
146{
147 if (gpu->ebi1_clk)
148 clk_prepare_enable(gpu->ebi1_clk);
149 if (gpu->bus_freq)
150 bs_set(gpu, gpu->bus_freq);
151 return 0;
152}
153
154static int disable_axi(struct msm_gpu *gpu)
155{
156 if (gpu->ebi1_clk)
157 clk_disable_unprepare(gpu->ebi1_clk);
158 if (gpu->bus_freq)
159 bs_set(gpu, 0);
160 return 0;
161}
162
163int msm_gpu_pm_resume(struct msm_gpu *gpu)
164{
165 int ret;
166
167 DBG("%s", gpu->name);
168
169 ret = enable_pwrrail(gpu);
170 if (ret)
171 return ret;
172
173 ret = enable_clk(gpu);
174 if (ret)
175 return ret;
176
177 ret = enable_axi(gpu);
178 if (ret)
179 return ret;
180
181 return 0;
182}
183
184int msm_gpu_pm_suspend(struct msm_gpu *gpu)
185{
186 int ret;
187
188 DBG("%s", gpu->name);
189
190 ret = disable_axi(gpu);
191 if (ret)
192 return ret;
193
194 ret = disable_clk(gpu);
195 if (ret)
196 return ret;
197
198 ret = disable_pwrrail(gpu);
199 if (ret)
200 return ret;
201
202 return 0;
203}
204
205/*
206 * Hangcheck detection for locked gpu:
207 */
208
209static void recover_worker(struct work_struct *work)
210{
211 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
212 struct drm_device *dev = gpu->dev;
213
214 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
215
216 mutex_lock(&dev->struct_mutex);
217 gpu->funcs->recover(gpu);
218 mutex_unlock(&dev->struct_mutex);
219
220 msm_gpu_retire(gpu);
221}
222
223static void hangcheck_timer_reset(struct msm_gpu *gpu)
224{
225 DBG("%s", gpu->name);
226 mod_timer(&gpu->hangcheck_timer,
227 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
228}
229
230static void hangcheck_handler(unsigned long data)
231{
232 struct msm_gpu *gpu = (struct msm_gpu *)data;
233 uint32_t fence = gpu->funcs->last_fence(gpu);
234
235 if (fence != gpu->hangcheck_fence) {
236 /* some progress has been made.. ya! */
237 gpu->hangcheck_fence = fence;
238 } else if (fence < gpu->submitted_fence) {
239 /* no progress and not done.. hung! */
240 struct msm_drm_private *priv = gpu->dev->dev_private;
241 gpu->hangcheck_fence = fence;
242 queue_work(priv->wq, &gpu->recover_work);
243 }
244
245 /* if still more pending work, reset the hangcheck timer: */
246 if (gpu->submitted_fence > gpu->hangcheck_fence)
247 hangcheck_timer_reset(gpu);
248}
249
250/*
251 * Cmdstream submission/retirement:
252 */
253
254static void retire_worker(struct work_struct *work)
255{
256 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
257 struct drm_device *dev = gpu->dev;
258 uint32_t fence = gpu->funcs->last_fence(gpu);
259
260 mutex_lock(&dev->struct_mutex);
261
262 while (!list_empty(&gpu->active_list)) {
263 struct msm_gem_object *obj;
264
265 obj = list_first_entry(&gpu->active_list,
266 struct msm_gem_object, mm_list);
267
268 if (obj->fence <= fence) {
269 /* move to inactive: */
270 msm_gem_move_to_inactive(&obj->base);
271 msm_gem_put_iova(&obj->base, gpu->id);
272 drm_gem_object_unreference(&obj->base);
273 } else {
274 break;
275 }
276 }
277
278 msm_update_fence(gpu->dev, fence);
279
280 mutex_unlock(&dev->struct_mutex);
281}
282
283/* call from irq handler to schedule work to retire bo's */
284void msm_gpu_retire(struct msm_gpu *gpu)
285{
286 struct msm_drm_private *priv = gpu->dev->dev_private;
287 queue_work(priv->wq, &gpu->retire_work);
288}
289
290/* add bo's to gpu's ring, and kick gpu: */
291int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
292 struct msm_file_private *ctx)
293{
294 struct drm_device *dev = gpu->dev;
295 struct msm_drm_private *priv = dev->dev_private;
296 int i, ret;
297
298 mutex_lock(&dev->struct_mutex);
299
300 submit->fence = ++priv->next_fence;
301
302 gpu->submitted_fence = submit->fence;
303
304 ret = gpu->funcs->submit(gpu, submit, ctx);
305 priv->lastctx = ctx;
306
307 for (i = 0; i < submit->nr_bos; i++) {
308 struct msm_gem_object *msm_obj = submit->bos[i].obj;
309
310 /* can't happen yet.. but when we add 2d support we'll have
311 * to deal w/ cross-ring synchronization:
312 */
313 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
314
315 if (!is_active(msm_obj)) {
316 uint32_t iova;
317
318 /* ring takes a reference to the bo and iova: */
319 drm_gem_object_reference(&msm_obj->base);
320 msm_gem_get_iova_locked(&msm_obj->base,
321 submit->gpu->id, &iova);
322 }
323
324 msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
325 }
326 hangcheck_timer_reset(gpu);
327 mutex_unlock(&dev->struct_mutex);
328
329 return ret;
330}
331
332/*
333 * Init/Cleanup:
334 */
335
336static irqreturn_t irq_handler(int irq, void *data)
337{
338 struct msm_gpu *gpu = data;
339 return gpu->funcs->irq(gpu);
340}
341
342static const char *clk_names[] = {
343 "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
344};
345
346int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
347 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
348 const char *name, const char *ioname, const char *irqname, int ringsz)
349{
350 int i, ret;
351
352 gpu->dev = drm;
353 gpu->funcs = funcs;
354 gpu->name = name;
355
356 INIT_LIST_HEAD(&gpu->active_list);
357 INIT_WORK(&gpu->retire_work, retire_worker);
358 INIT_WORK(&gpu->recover_work, recover_worker);
359
360 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
361 (unsigned long)gpu);
362
363 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
364
365 /* Map registers: */
366 gpu->mmio = msm_ioremap(pdev, ioname, name);
367 if (IS_ERR(gpu->mmio)) {
368 ret = PTR_ERR(gpu->mmio);
369 goto fail;
370 }
371
372 /* Get Interrupt: */
373 gpu->irq = platform_get_irq_byname(pdev, irqname);
374 if (gpu->irq < 0) {
375 ret = gpu->irq;
376 dev_err(drm->dev, "failed to get irq: %d\n", ret);
377 goto fail;
378 }
379
380 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
381 IRQF_TRIGGER_HIGH, gpu->name, gpu);
382 if (ret) {
383 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
384 goto fail;
385 }
386
387 /* Acquire clocks: */
388 for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
389 gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
390 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
391 if (IS_ERR(gpu->grp_clks[i]))
392 gpu->grp_clks[i] = NULL;
393 }
394
395 gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
396 DBG("ebi1_clk: %p", gpu->ebi1_clk);
397 if (IS_ERR(gpu->ebi1_clk))
398 gpu->ebi1_clk = NULL;
399
400 /* Acquire regulators: */
401 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
402 DBG("gpu_reg: %p", gpu->gpu_reg);
403 if (IS_ERR(gpu->gpu_reg))
404 gpu->gpu_reg = NULL;
405
406 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
407 DBG("gpu_cx: %p", gpu->gpu_cx);
408 if (IS_ERR(gpu->gpu_cx))
409 gpu->gpu_cx = NULL;
410
411 /* Setup IOMMU.. eventually we will (I think) do this once per context
412 * and have separate page tables per context. For now, to keep things
413 * simple and to get something working, just use a single address space:
414 */
415 gpu->iommu = iommu_domain_alloc(&platform_bus_type);
416 if (!gpu->iommu) {
417 dev_err(drm->dev, "failed to allocate IOMMU\n");
418 ret = -ENOMEM;
419 goto fail;
420 }
421 gpu->id = msm_register_iommu(drm, gpu->iommu);
422
423 /* Create ringbuffer: */
424 gpu->rb = msm_ringbuffer_new(gpu, ringsz);
425 if (IS_ERR(gpu->rb)) {
426 ret = PTR_ERR(gpu->rb);
427 gpu->rb = NULL;
428 dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
429 goto fail;
430 }
431
432 ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
433 if (ret) {
434 gpu->rb_iova = 0;
435 dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
436 goto fail;
437 }
438
439 bs_init(gpu, pdev);
440
441 return 0;
442
443fail:
444 return ret;
445}
446
447void msm_gpu_cleanup(struct msm_gpu *gpu)
448{
449 DBG("%s", gpu->name);
450
451 WARN_ON(!list_empty(&gpu->active_list));
452
453 bs_fini(gpu);
454
455 if (gpu->rb) {
456 if (gpu->rb_iova)
457 msm_gem_put_iova(gpu->rb->bo, gpu->id);
458 msm_ringbuffer_destroy(gpu->rb);
459 }
460
461 if (gpu->iommu)
462 iommu_domain_free(gpu->iommu);
463}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
new file mode 100644
index 000000000000..8cd829e520bb
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_GPU_H__
19#define __MSM_GPU_H__
20
21#include <linux/clk.h>
22#include <linux/regulator/consumer.h>
23
24#include "msm_drv.h"
25#include "msm_ringbuffer.h"
26
27struct msm_gem_submit;
28
29/* So far, with hardware that I've seen to date, we can have:
30 * + zero, one, or two z180 2d cores
31 * + a3xx or a2xx 3d core, which share a common CP (the firmware
32 * for the CP seems to implement some different PM4 packet types
33 * but the basics of cmdstream submission are the same)
34 *
35 * Which means that the eventual complete "class" hierarchy, once
36 * support for all past and present hw is in place, becomes:
37 * + msm_gpu
38 * + adreno_gpu
39 * + a3xx_gpu
40 * + a2xx_gpu
41 * + z180_gpu
42 */
43struct msm_gpu_funcs {
44 int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
45 int (*hw_init)(struct msm_gpu *gpu);
46 int (*pm_suspend)(struct msm_gpu *gpu);
47 int (*pm_resume)(struct msm_gpu *gpu);
48 int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
49 struct msm_file_private *ctx);
50 void (*flush)(struct msm_gpu *gpu);
51 void (*idle)(struct msm_gpu *gpu);
52 irqreturn_t (*irq)(struct msm_gpu *irq);
53 uint32_t (*last_fence)(struct msm_gpu *gpu);
54 void (*recover)(struct msm_gpu *gpu);
55 void (*destroy)(struct msm_gpu *gpu);
56#ifdef CONFIG_DEBUG_FS
57 /* show GPU status in debugfs: */
58 void (*show)(struct msm_gpu *gpu, struct seq_file *m);
59#endif
60};
61
62struct msm_gpu {
63 const char *name;
64 struct drm_device *dev;
65 const struct msm_gpu_funcs *funcs;
66
67 struct msm_ringbuffer *rb;
68 uint32_t rb_iova;
69
70 /* list of GEM active objects: */
71 struct list_head active_list;
72
73 uint32_t submitted_fence;
74
75 /* worker for handling active-list retiring: */
76 struct work_struct retire_work;
77
78 void __iomem *mmio;
79 int irq;
80
81 struct iommu_domain *iommu;
82 int id;
83
84 /* Power Control: */
85 struct regulator *gpu_reg, *gpu_cx;
86 struct clk *ebi1_clk, *grp_clks[5];
87 uint32_t fast_rate, slow_rate, bus_freq;
88 uint32_t bsc;
89
90 /* Hang Detction: */
91#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
92#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
93 struct timer_list hangcheck_timer;
94 uint32_t hangcheck_fence;
95 struct work_struct recover_work;
96};
97
98static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
99{
100 msm_writel(data, gpu->mmio + (reg << 2));
101}
102
103static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
104{
105 return msm_readl(gpu->mmio + (reg << 2));
106}
107
108int msm_gpu_pm_suspend(struct msm_gpu *gpu);
109int msm_gpu_pm_resume(struct msm_gpu *gpu);
110
111void msm_gpu_retire(struct msm_gpu *gpu);
112int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
113 struct msm_file_private *ctx);
114
115int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
116 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
117 const char *name, const char *ioname, const char *irqname, int ringsz);
118void msm_gpu_cleanup(struct msm_gpu *gpu);
119
120struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
121void __init a3xx_register(void);
122void __exit a3xx_unregister(void);
123
124#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
new file mode 100644
index 000000000000..8171537dd7d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -0,0 +1,61 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_ringbuffer.h"
19#include "msm_gpu.h"
20
21struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
22{
23 struct msm_ringbuffer *ring;
24 int ret;
25
26 size = ALIGN(size, 4); /* size should be dword aligned */
27
28 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
29 if (!ring) {
30 ret = -ENOMEM;
31 goto fail;
32 }
33
34 ring->gpu = gpu;
35 ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
36 if (IS_ERR(ring->bo)) {
37 ret = PTR_ERR(ring->bo);
38 ring->bo = NULL;
39 goto fail;
40 }
41
42 ring->start = msm_gem_vaddr_locked(ring->bo);
43 ring->end = ring->start + (size / 4);
44 ring->cur = ring->start;
45
46 ring->size = size;
47
48 return ring;
49
50fail:
51 if (ring)
52 msm_ringbuffer_destroy(ring);
53 return ERR_PTR(ret);
54}
55
56void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
57{
58 if (ring->bo)
59 drm_gem_object_unreference(ring->bo);
60 kfree(ring);
61}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
new file mode 100644
index 000000000000..6e0e1049fa4f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_RINGBUFFER_H__
19#define __MSM_RINGBUFFER_H__
20
21#include "msm_drv.h"
22
23struct msm_ringbuffer {
24 struct msm_gpu *gpu;
25 int size;
26 struct drm_gem_object *bo;
27 uint32_t *start, *end, *cur;
28};
29
30struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
31void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
32
33/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
34
35static inline void
36OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
37{
38 if (ring->cur == ring->end)
39 ring->cur = ring->start;
40 *(ring->cur++) = data;
41}
42
43#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index d8291724dbd4..7a4e0891c5f8 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
98 u32 splitoff; 98 u32 splitoff;
99 u32 s, e; 99 u32 s, e;
100 100
101 BUG_ON(!type);
102
101 list_for_each_entry(this, &mm->free, fl_entry) { 103 list_for_each_entry(this, &mm->free, fl_entry) {
102 e = this->offset + this->length; 104 e = this->offset + this->length;
103 s = this->offset; 105 s = this->offset;
@@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
162 struct nouveau_mm_node *prev, *this, *next; 164 struct nouveau_mm_node *prev, *this, *next;
163 u32 mask = align - 1; 165 u32 mask = align - 1;
164 166
167 BUG_ON(!type);
168
165 list_for_each_entry_reverse(this, &mm->free, fl_entry) { 169 list_for_each_entry_reverse(this, &mm->free, fl_entry) {
166 u32 e = this->offset + this->length; 170 u32 e = this->offset + this->length;
167 u32 s = this->offset; 171 u32 s = this->offset;
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
index 6161eaf5447c..52fb2aa129e8 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -27,6 +27,8 @@
27#include <core/subdev.h> 27#include <core/subdev.h>
28#include <core/printk.h> 28#include <core/printk.h>
29 29
30int nv_printk_suspend_level = NV_DBG_DEBUG;
31
30void 32void
31nv_printk_(struct nouveau_object *object, const char *pfx, int level, 33nv_printk_(struct nouveau_object *object, const char *pfx, int level,
32 const char *fmt, ...) 34 const char *fmt, ...)
@@ -72,3 +74,20 @@ nv_printk_(struct nouveau_object *object, const char *pfx, int level,
72 vprintk(mfmt, args); 74 vprintk(mfmt, args);
73 va_end(args); 75 va_end(args);
74} 76}
77
78#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x
79
80const char *nv_printk_level_to_pfx(int level)
81{
82 switch (level) {
83 CONV_LEVEL(FATAL);
84 CONV_LEVEL(ERROR);
85 CONV_LEVEL(WARN);
86 CONV_LEVEL(INFO);
87 CONV_LEVEL(DEBUG);
88 CONV_LEVEL(PARANOIA);
89 CONV_LEVEL(TRACE);
90 CONV_LEVEL(SPAM);
91 }
92 return NV_PRINTK_DEBUG;
93}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
index 262c9f5f5f60..ce860de43e61 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 nv_subdev(priv)->unit = 0x00008000; 92 nv_subdev(priv)->unit = 0x00008000;
93 nv_subdev(priv)->intr = nouveau_falcon_intr;
93 nv_engine(priv)->cclass = &nvc0_bsp_cclass; 94 nv_engine(priv)->cclass = &nvc0_bsp_cclass;
94 nv_engine(priv)->sclass = nvc0_bsp_sclass; 95 nv_engine(priv)->sclass = nvc0_bsp_sclass;
95 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
index c46882c83982..ba6aeca0285e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 nv_subdev(priv)->unit = 0x00008000; 92 nv_subdev(priv)->unit = 0x00008000;
93 nv_subdev(priv)->intr = nouveau_falcon_intr;
93 nv_engine(priv)->cclass = &nve0_bsp_cclass; 94 nv_engine(priv)->cclass = &nve0_bsp_cclass;
94 nv_engine(priv)->sclass = nve0_bsp_sclass; 95 nv_engine(priv)->sclass = nve0_bsp_sclass;
95 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 31cc8fe8e7f0..054d9cff4f53 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -150,7 +150,7 @@ dp_link_train_update(struct dp_state *dp, u32 delay)
150 if (ret) 150 if (ret)
151 return ret; 151 return ret;
152 152
153 DBG("status %*ph\n", 6, dp->stat); 153 DBG("status %6ph\n", dp->stat);
154 return 0; 154 return 0;
155} 155}
156 156
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
index 373dbcc523b2..a19e7d79b847 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -36,6 +36,8 @@ nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
36 if (data && data[0]) { 36 if (data && data[0]) {
37 for (i = 0; i < size; i++) 37 for (i = 0; i < size; i++)
38 nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); 38 nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
39 for (; i < 0x60; i++)
40 nv_wr32(priv, 0x61c440 + soff, (i << 8));
39 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); 41 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
40 } else 42 } else
41 if (data) { 43 if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
index dc57e24fc1df..717639386ced 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -41,6 +41,8 @@ nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
41 if (data && data[0]) { 41 if (data && data[0]) {
42 for (i = 0; i < size; i++) 42 for (i = 0; i < size; i++)
43 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); 43 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
44 for (; i < 0x60; i++)
45 nv_wr32(priv, 0x10ec00 + soff, (i << 8));
44 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); 46 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
45 } else 47 } else
46 if (data) { 48 if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index ab1e918469a8..526b75242899 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -47,14 +47,8 @@ int
47nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) 47nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
48{ 48{
49 struct nv50_disp_priv *priv = (void *)object->engine; 49 struct nv50_disp_priv *priv = (void *)object->engine;
50 struct nouveau_bios *bios = nouveau_bios(priv);
51 const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
52 const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; 50 const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
53 const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
54 const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); 51 const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
55 const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
56 struct dcb_output outp;
57 u8 ver, hdr;
58 u32 data; 52 u32 data;
59 int ret = -EINVAL; 53 int ret = -EINVAL;
60 54
@@ -62,8 +56,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
62 return -EINVAL; 56 return -EINVAL;
63 data = *(u32 *)args; 57 data = *(u32 *)args;
64 58
65 if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
66 return -ENODEV;
67 59
68 switch (mthd & ~0x3f) { 60 switch (mthd & ~0x3f) {
69 case NV50_DISP_SOR_PWR: 61 case NV50_DISP_SOR_PWR:
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
index 3c7a31f7590e..e03fc8e4dc1d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
@@ -23,6 +23,25 @@
23#include <engine/falcon.h> 23#include <engine/falcon.h>
24#include <subdev/timer.h> 24#include <subdev/timer.h>
25 25
26void
27nouveau_falcon_intr(struct nouveau_subdev *subdev)
28{
29 struct nouveau_falcon *falcon = (void *)subdev;
30 u32 dispatch = nv_ro32(falcon, 0x01c);
31 u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
32
33 if (intr & 0x00000010) {
34 nv_debug(falcon, "ucode halted\n");
35 nv_wo32(falcon, 0x004, 0x00000010);
36 intr &= ~0x00000010;
37 }
38
39 if (intr) {
40 nv_error(falcon, "unhandled intr 0x%08x\n", intr);
41 nv_wo32(falcon, 0x004, intr);
42 }
43}
44
26u32 45u32
27_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) 46_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
28{ 47{
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 49ecbb859b25..c19004301309 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -265,8 +265,8 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
265int 265int
266nv31_mpeg_init(struct nouveau_object *object) 266nv31_mpeg_init(struct nouveau_object *object)
267{ 267{
268 struct nouveau_engine *engine = nv_engine(object->engine); 268 struct nouveau_engine *engine = nv_engine(object);
269 struct nv31_mpeg_priv *priv = (void *)engine; 269 struct nv31_mpeg_priv *priv = (void *)object;
270 struct nouveau_fb *pfb = nouveau_fb(object); 270 struct nouveau_fb *pfb = nouveau_fb(object);
271 int ret, i; 271 int ret, i;
272 272
@@ -284,7 +284,10 @@ nv31_mpeg_init(struct nouveau_object *object)
284 /* PMPEG init */ 284 /* PMPEG init */
285 nv_wr32(priv, 0x00b32c, 0x00000000); 285 nv_wr32(priv, 0x00b32c, 0x00000000);
286 nv_wr32(priv, 0x00b314, 0x00000100); 286 nv_wr32(priv, 0x00b314, 0x00000100);
287 nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031); 287 if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv))
288 nv_wr32(priv, 0x00b220, 0x00000044);
289 else
290 nv_wr32(priv, 0x00b220, 0x00000031);
288 nv_wr32(priv, 0x00b300, 0x02001ec1); 291 nv_wr32(priv, 0x00b300, 0x02001ec1);
289 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); 292 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
290 293
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index f7c581ad1991..dd6196072e9c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -61,6 +61,7 @@ nv40_mpeg_context_ctor(struct nouveau_object *parent,
61 if (ret) 61 if (ret)
62 return ret; 62 return ret;
63 63
64 nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
64 return 0; 65 return 0;
65} 66}
66 67
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
index 98072c1ff360..73719aaa62d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 nv_subdev(priv)->unit = 0x00000002; 92 nv_subdev(priv)->unit = 0x00000002;
93 nv_subdev(priv)->intr = nouveau_falcon_intr;
93 nv_engine(priv)->cclass = &nvc0_ppp_cclass; 94 nv_engine(priv)->cclass = &nvc0_ppp_cclass;
94 nv_engine(priv)->sclass = nvc0_ppp_sclass; 95 nv_engine(priv)->sclass = nvc0_ppp_sclass;
95 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
index 1879229b60eb..ac1f62aace72 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 nv_subdev(priv)->unit = 0x00020000; 92 nv_subdev(priv)->unit = 0x00020000;
93 nv_subdev(priv)->intr = nouveau_falcon_intr;
93 nv_engine(priv)->cclass = &nvc0_vp_cclass; 94 nv_engine(priv)->cclass = &nvc0_vp_cclass;
94 nv_engine(priv)->sclass = nvc0_vp_sclass; 95 nv_engine(priv)->sclass = nvc0_vp_sclass;
95 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
index d28ecbf7bc49..d4c3108479c9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 return ret; 90 return ret;
91 91
92 nv_subdev(priv)->unit = 0x00020000; 92 nv_subdev(priv)->unit = 0x00020000;
93 nv_subdev(priv)->intr = nouveau_falcon_intr;
93 nv_engine(priv)->cclass = &nve0_vp_cclass; 94 nv_engine(priv)->cclass = &nve0_vp_cclass;
94 nv_engine(priv)->sclass = nve0_vp_sclass; 95 nv_engine(priv)->sclass = nve0_vp_sclass;
95 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
index 0639bc59d0a5..5f6ede7c4892 100644
--- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
@@ -118,7 +118,13 @@ _nouveau_xtensa_init(struct nouveau_object *object)
118 return ret; 118 return ret;
119 } 119 }
120 120
121 ret = nouveau_gpuobj_new(object, NULL, fw->size, 0x1000, 0, 121 if (fw->size > 0x40000) {
122 nv_warn(xtensa, "firmware %s too large\n", name);
123 release_firmware(fw);
124 return -EINVAL;
125 }
126
127 ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
122 &xtensa->gpu_fw); 128 &xtensa->gpu_fw);
123 if (ret) { 129 if (ret) {
124 release_firmware(fw); 130 release_firmware(fw);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index febed2ea5c80..d87836e3a704 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -15,6 +15,12 @@ struct nouveau_object;
15#define NV_PRINTK_TRACE KERN_DEBUG 15#define NV_PRINTK_TRACE KERN_DEBUG
16#define NV_PRINTK_SPAM KERN_DEBUG 16#define NV_PRINTK_SPAM KERN_DEBUG
17 17
18extern int nv_printk_suspend_level;
19
20#define NV_DBG_SUSPEND (nv_printk_suspend_level)
21#define NV_PRINTK_SUSPEND (nv_printk_level_to_pfx(nv_printk_suspend_level))
22
23const char *nv_printk_level_to_pfx(int level);
18void __printf(4, 5) 24void __printf(4, 5)
19nv_printk_(struct nouveau_object *, const char *, int, const char *, ...); 25nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
20 26
@@ -31,6 +37,13 @@ nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
31#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) 37#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
32#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) 38#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
33 39
40#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a)
41
42static inline void nv_suspend_set_printk_level(int level)
43{
44 nv_printk_suspend_level = level;
45}
46
34#define nv_assert(f,a...) do { \ 47#define nv_assert(f,a...) do { \
35 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ 48 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
36 nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \ 49 nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
index 1edec386ab36..181aa7da524d 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
@@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
72 struct nouveau_oclass *, u32, bool, const char *, 72 struct nouveau_oclass *, u32, bool, const char *,
73 const char *, int, void **); 73 const char *, int, void **);
74 74
75void nouveau_falcon_intr(struct nouveau_subdev *subdev);
76
75#define _nouveau_falcon_dtor _nouveau_engine_dtor 77#define _nouveau_falcon_dtor _nouveau_engine_dtor
76int _nouveau_falcon_init(struct nouveau_object *); 78int _nouveau_falcon_init(struct nouveau_object *);
77int _nouveau_falcon_fini(struct nouveau_object *, bool); 79int _nouveau_falcon_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index d5502267c30f..9d2cd2006250 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -20,8 +20,8 @@ nouveau_mc(void *obj)
20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; 20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
21} 21}
22 22
23#define nouveau_mc_create(p,e,o,d) \ 23#define nouveau_mc_create(p,e,o,m,d) \
24 nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) 24 nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
25#define nouveau_mc_destroy(p) ({ \ 25#define nouveau_mc_destroy(p) ({ \
26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ 26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
27}) 27})
@@ -33,7 +33,8 @@ nouveau_mc(void *obj)
33}) 33})
34 34
35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, 35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
36 struct nouveau_oclass *, int, void **); 36 struct nouveau_oclass *, const struct nouveau_mc_intr *,
37 int, void **);
37void _nouveau_mc_dtor(struct nouveau_object *); 38void _nouveau_mc_dtor(struct nouveau_object *);
38int _nouveau_mc_init(struct nouveau_object *); 39int _nouveau_mc_init(struct nouveau_object *);
39int _nouveau_mc_fini(struct nouveau_object *, bool); 40int _nouveau_mc_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index f2e87b105666..fcf57fa309bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -55,7 +55,7 @@ struct nouveau_vma {
55struct nouveau_vm { 55struct nouveau_vm {
56 struct nouveau_vmmgr *vmm; 56 struct nouveau_vmmgr *vmm;
57 struct nouveau_mm mm; 57 struct nouveau_mm mm;
58 int refcount; 58 struct kref refcount;
59 59
60 struct list_head pgd_list; 60 struct list_head pgd_list;
61 atomic_t engref[NVDEV_SUBDEV_NR]; 61 atomic_t engref[NVDEV_SUBDEV_NR];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 0687e6481438..2e11ea02cf87 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2165,7 +2165,7 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
2165 u16 data; 2165 u16 data;
2166 2166
2167 if (execute) 2167 if (execute)
2168 nv_info(bios, "running init tables\n"); 2168 nv_suspend(bios, "running init tables\n");
2169 while (!ret && (data = (init_script(bios, ++i)))) { 2169 while (!ret && (data = (init_script(bios, ++i)))) {
2170 struct nvbios_init init = { 2170 struct nvbios_init init = {
2171 .subdev = subdev, 2171 .subdev = subdev,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 6c974dd83e8b..db9d6ddde52c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -81,7 +81,7 @@ void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
81void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, 81void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
82 u32 pitch, u32 flags, struct nouveau_fb_tile *); 82 u32 pitch, u32 flags, struct nouveau_fb_tile *);
83 83
84void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **); 84void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
85extern int nv50_fb_memtype[0x80]; 85extern int nv50_fb_memtype[0x80];
86 86
87#endif 87#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index 19e3a9a63a02..ab7ef0ac9e34 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 40 return ret;
41 41
42 switch (pfb914 & 0x00000003) { 42 switch (pfb914 & 0x00000003) {
43 case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; 43 case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
44 case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; 44 case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
45 case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; 45 case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
46 case 0x00000003: break; 46 case 0x00000003: break;
47 } 47 }
48 48
49 pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 50 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
51 pfb->ram->tags = nv_rd32(pfb, 0x100320); 51 ram->tags = nv_rd32(pfb, 0x100320);
52 return 0; 52 return 0;
53} 53}
54 54
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
index 7192aa6e5577..63a6aab86028 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
@@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
38 if (ret) 38 if (ret)
39 return ret; 39 return ret;
40 40
41 pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 41 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
42 pfb->ram->type = NV_MEM_TYPE_STOLEN; 42 ram->type = NV_MEM_TYPE_STOLEN;
43 return 0; 43 return 0;
44} 44}
45 45
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index af5aa7ee8ad9..903baff77fdd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -27,17 +27,10 @@
27#include "priv.h" 27#include "priv.h"
28 28
29void 29void
30nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) 30__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
31{ 31{
32 struct nouveau_mm_node *this; 32 struct nouveau_mm_node *this;
33 struct nouveau_mem *mem;
34 33
35 mem = *pmem;
36 *pmem = NULL;
37 if (unlikely(mem == NULL))
38 return;
39
40 mutex_lock(&pfb->base.mutex);
41 while (!list_empty(&mem->regions)) { 34 while (!list_empty(&mem->regions)) {
42 this = list_first_entry(&mem->regions, typeof(*this), rl_entry); 35 this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
43 36
@@ -46,6 +39,19 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
46 } 39 }
47 40
48 nouveau_mm_free(&pfb->tags, &mem->tag); 41 nouveau_mm_free(&pfb->tags, &mem->tag);
42}
43
44void
45nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
46{
47 struct nouveau_mem *mem = *pmem;
48
49 *pmem = NULL;
50 if (unlikely(mem == NULL))
51 return;
52
53 mutex_lock(&pfb->base.mutex);
54 __nv50_ram_put(pfb, mem);
49 mutex_unlock(&pfb->base.mutex); 55 mutex_unlock(&pfb->base.mutex);
50 56
51 kfree(mem); 57 kfree(mem);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 9c3634acbb9d..cf97c4de4a6b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -33,11 +33,19 @@ void
33nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) 33nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
34{ 34{
35 struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); 35 struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb);
36 struct nouveau_mem *mem = *pmem;
36 37
37 if ((*pmem)->tag) 38 *pmem = NULL;
38 ltcg->tags_free(ltcg, &(*pmem)->tag); 39 if (unlikely(mem == NULL))
40 return;
39 41
40 nv50_ram_put(pfb, pmem); 42 mutex_lock(&pfb->base.mutex);
43 if (mem->tag)
44 ltcg->tags_free(ltcg, &mem->tag);
45 __nv50_ram_put(pfb, mem);
46 mutex_unlock(&pfb->base.mutex);
47
48 kfree(mem);
41} 49}
42 50
43int 51int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index bf489dcf46e2..c4c1d415e7fe 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -103,7 +103,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
103 int i; 103 int i;
104 104
105 intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); 105 intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
106 if (nv_device(priv)->chipset >= 0x90) 106 if (nv_device(priv)->chipset > 0x92)
107 intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070); 107 intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
108 108
109 hi = (intr0 & 0x0000ffff) | (intr1 << 16); 109 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
@@ -115,7 +115,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
115 } 115 }
116 116
117 nv_wr32(priv, 0xe054, intr0); 117 nv_wr32(priv, 0xe054, intr0);
118 if (nv_device(priv)->chipset >= 0x90) 118 if (nv_device(priv)->chipset > 0x92)
119 nv_wr32(priv, 0xe074, intr1); 119 nv_wr32(priv, 0xe074, intr1);
120} 120}
121 121
@@ -146,7 +146,7 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
146 int ret; 146 int ret;
147 147
148 ret = nouveau_gpio_create(parent, engine, oclass, 148 ret = nouveau_gpio_create(parent, engine, oclass,
149 nv_device(parent)->chipset >= 0x90 ? 32 : 16, 149 nv_device(parent)->chipset > 0x92 ? 32 : 16,
150 &priv); 150 &priv);
151 *pobject = nv_object(priv); 151 *pobject = nv_object(priv);
152 if (ret) 152 if (ret)
@@ -182,7 +182,7 @@ nv50_gpio_init(struct nouveau_object *object)
182 /* disable, and ack any pending gpio interrupts */ 182 /* disable, and ack any pending gpio interrupts */
183 nv_wr32(priv, 0xe050, 0x00000000); 183 nv_wr32(priv, 0xe050, 0x00000000);
184 nv_wr32(priv, 0xe054, 0xffffffff); 184 nv_wr32(priv, 0xe054, 0xffffffff);
185 if (nv_device(priv)->chipset >= 0x90) { 185 if (nv_device(priv)->chipset > 0x92) {
186 nv_wr32(priv, 0xe070, 0x00000000); 186 nv_wr32(priv, 0xe070, 0x00000000);
187 nv_wr32(priv, 0xe074, 0xffffffff); 187 nv_wr32(priv, 0xe074, 0xffffffff);
188 } 188 }
@@ -195,7 +195,7 @@ nv50_gpio_fini(struct nouveau_object *object, bool suspend)
195{ 195{
196 struct nv50_gpio_priv *priv = (void *)object; 196 struct nv50_gpio_priv *priv = (void *)object;
197 nv_wr32(priv, 0xe050, 0x00000000); 197 nv_wr32(priv, 0xe050, 0x00000000);
198 if (nv_device(priv)->chipset >= 0x90) 198 if (nv_device(priv)->chipset > 0x92)
199 nv_wr32(priv, 0xe070, 0x00000000); 199 nv_wr32(priv, 0xe070, 0x00000000);
200 return nouveau_gpio_fini(&priv->base, suspend); 200 return nouveau_gpio_fini(&priv->base, suspend);
201} 201}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index bcca883018f4..cce65cc56514 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -30,8 +30,9 @@ struct nvc0_ltcg_priv {
30 struct nouveau_ltcg base; 30 struct nouveau_ltcg base;
31 u32 part_nr; 31 u32 part_nr;
32 u32 subp_nr; 32 u32 subp_nr;
33 struct nouveau_mm tags;
34 u32 num_tags; 33 u32 num_tags;
34 u32 tag_base;
35 struct nouveau_mm tags;
35 struct nouveau_mm_node *tag_ram; 36 struct nouveau_mm_node *tag_ram;
36}; 37};
37 38
@@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
117 u32 tag_size, tag_margin, tag_align; 118 u32 tag_size, tag_margin, tag_align;
118 int ret; 119 int ret;
119 120
120 nv_wr32(priv, 0x17e8d8, priv->part_nr);
121 if (nv_device(pfb)->card_type >= NV_E0)
122 nv_wr32(priv, 0x17e000, priv->part_nr);
123
124 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ 121 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
125 priv->num_tags = (pfb->ram->size >> 17) / 4; 122 priv->num_tags = (pfb->ram->size >> 17) / 4;
126 if (priv->num_tags > (1 << 17)) 123 if (priv->num_tags > (1 << 17))
@@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
142 tag_size += tag_align; 139 tag_size += tag_align;
143 tag_size = (tag_size + 0xfff) >> 12; /* round up */ 140 tag_size = (tag_size + 0xfff) >> 12; /* round up */
144 141
145 ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, 142 ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
146 &priv->tag_ram); 143 &priv->tag_ram);
147 if (ret) { 144 if (ret) {
148 priv->num_tags = 0; 145 priv->num_tags = 0;
@@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
152 tag_base += tag_align - 1; 149 tag_base += tag_align - 1;
153 ret = do_div(tag_base, tag_align); 150 ret = do_div(tag_base, tag_align);
154 151
155 nv_wr32(priv, 0x17e8d4, tag_base); 152 priv->tag_base = tag_base;
156 } 153 }
157 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); 154 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
158 155
@@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
182 } 179 }
183 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; 180 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
184 181
185 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
186
187 ret = nvc0_ltcg_init_tag_ram(pfb, priv); 182 ret = nvc0_ltcg_init_tag_ram(pfb, priv);
188 if (ret) 183 if (ret)
189 return ret; 184 return ret;
@@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
209 nouveau_ltcg_destroy(ltcg); 204 nouveau_ltcg_destroy(ltcg);
210} 205}
211 206
207static int
208nvc0_ltcg_init(struct nouveau_object *object)
209{
210 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
211 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
212 int ret;
213
214 ret = nouveau_ltcg_init(ltcg);
215 if (ret)
216 return ret;
217
218 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
219 nv_wr32(priv, 0x17e8d8, priv->part_nr);
220 if (nv_device(ltcg)->card_type >= NV_E0)
221 nv_wr32(priv, 0x17e000, priv->part_nr);
222 nv_wr32(priv, 0x17e8d4, priv->tag_base);
223 return 0;
224}
225
212struct nouveau_oclass 226struct nouveau_oclass
213nvc0_ltcg_oclass = { 227nvc0_ltcg_oclass = {
214 .handle = NV_SUBDEV(LTCG, 0xc0), 228 .handle = NV_SUBDEV(LTCG, 0xc0),
215 .ofuncs = &(struct nouveau_ofuncs) { 229 .ofuncs = &(struct nouveau_ofuncs) {
216 .ctor = nvc0_ltcg_ctor, 230 .ctor = nvc0_ltcg_ctor,
217 .dtor = nvc0_ltcg_dtor, 231 .dtor = nvc0_ltcg_dtor,
218 .init = _nouveau_ltcg_init, 232 .init = nvc0_ltcg_init,
219 .fini = _nouveau_ltcg_fini, 233 .fini = _nouveau_ltcg_fini,
220 }, 234 },
221}; 235};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 1c0330b8c9a4..20f9a538746e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -23,16 +23,20 @@
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include <subdev/mc.h>
26#include <linux/pm_runtime.h>
26 27
27static irqreturn_t 28static irqreturn_t
28nouveau_mc_intr(int irq, void *arg) 29nouveau_mc_intr(int irq, void *arg)
29{ 30{
30 struct nouveau_mc *pmc = arg; 31 struct nouveau_mc *pmc = arg;
31 const struct nouveau_mc_intr *map = pmc->intr_map; 32 const struct nouveau_mc_intr *map = pmc->intr_map;
33 struct nouveau_device *device = nv_device(pmc);
32 struct nouveau_subdev *unit; 34 struct nouveau_subdev *unit;
33 u32 stat, intr; 35 u32 stat, intr;
34 36
35 intr = stat = nv_rd32(pmc, 0x000100); 37 intr = stat = nv_rd32(pmc, 0x000100);
38 if (intr == 0xffffffff)
39 return IRQ_NONE;
36 while (stat && map->stat) { 40 while (stat && map->stat) {
37 if (stat & map->stat) { 41 if (stat & map->stat) {
38 unit = nouveau_subdev(pmc, map->unit); 42 unit = nouveau_subdev(pmc, map->unit);
@@ -47,6 +51,8 @@ nouveau_mc_intr(int irq, void *arg)
47 nv_error(pmc, "unknown intr 0x%08x\n", stat); 51 nv_error(pmc, "unknown intr 0x%08x\n", stat);
48 } 52 }
49 53
54 if (stat == IRQ_HANDLED)
55 pm_runtime_mark_last_busy(&device->pdev->dev);
50 return stat ? IRQ_HANDLED : IRQ_NONE; 56 return stat ? IRQ_HANDLED : IRQ_NONE;
51} 57}
52 58
@@ -80,7 +86,9 @@ _nouveau_mc_dtor(struct nouveau_object *object)
80 86
81int 87int
82nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, 88nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
83 struct nouveau_oclass *oclass, int length, void **pobject) 89 struct nouveau_oclass *oclass,
90 const struct nouveau_mc_intr *intr_map,
91 int length, void **pobject)
84{ 92{
85 struct nouveau_device *device = nv_device(parent); 93 struct nouveau_device *device = nv_device(parent);
86 struct nouveau_mc *pmc; 94 struct nouveau_mc *pmc;
@@ -92,6 +100,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
92 if (ret) 100 if (ret)
93 return ret; 101 return ret;
94 102
103 pmc->intr_map = intr_map;
104
95 ret = request_irq(device->pdev->irq, nouveau_mc_intr, 105 ret = request_irq(device->pdev->irq, nouveau_mc_intr,
96 IRQF_SHARED, "nouveau", pmc); 106 IRQF_SHARED, "nouveau", pmc);
97 if (ret < 0) 107 if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 8c769715227b..64aa4edb0d9d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nv04_mc_priv *priv; 50 struct nv04_mc_priv *priv;
51 int ret; 51 int ret;
52 52
53 ret = nouveau_mc_create(parent, engine, oclass, &priv); 53 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
54 *pobject = nv_object(priv); 54 *pobject = nv_object(priv);
55 if (ret) 55 if (ret)
56 return ret; 56 return ret;
57 57
58 priv->base.intr_map = nv04_mc_intr;
59 return 0; 58 return 0;
60} 59}
61 60
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 51919371810f..d9891782bf28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
36 struct nv44_mc_priv *priv; 36 struct nv44_mc_priv *priv;
37 int ret; 37 int ret;
38 38
39 ret = nouveau_mc_create(parent, engine, oclass, &priv); 39 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
40 *pobject = nv_object(priv); 40 *pobject = nv_object(priv);
41 if (ret) 41 if (ret)
42 return ret; 42 return ret;
43 43
44 priv->base.intr_map = nv04_mc_intr;
45 return 0; 44 return 0;
46} 45}
47 46
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 0cb322a5e72c..2b1afe225db8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -41,7 +41,7 @@ nv50_mc_intr[] = {
41 { 0x04000000, NVDEV_ENGINE_DISP }, 41 { 0x04000000, NVDEV_ENGINE_DISP },
42 { 0x10000000, NVDEV_SUBDEV_BUS }, 42 { 0x10000000, NVDEV_SUBDEV_BUS },
43 { 0x80000000, NVDEV_ENGINE_SW }, 43 { 0x80000000, NVDEV_ENGINE_SW },
44 { 0x0000d101, NVDEV_SUBDEV_FB }, 44 { 0x0002d101, NVDEV_SUBDEV_FB },
45 {}, 45 {},
46}; 46};
47 47
@@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
53 struct nv50_mc_priv *priv; 53 struct nv50_mc_priv *priv;
54 int ret; 54 int ret;
55 55
56 ret = nouveau_mc_create(parent, engine, oclass, &priv); 56 ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
57 *pobject = nv_object(priv); 57 *pobject = nv_object(priv);
58 if (ret) 58 if (ret)
59 return ret; 59 return ret;
60 60
61 priv->base.intr_map = nv50_mc_intr;
62 return 0; 61 return 0;
63} 62}
64 63
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index e82fd21b5041..0d57b4d3e001 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
54 struct nv98_mc_priv *priv; 54 struct nv98_mc_priv *priv;
55 int ret; 55 int ret;
56 56
57 ret = nouveau_mc_create(parent, engine, oclass, &priv); 57 ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
58 *pobject = nv_object(priv); 58 *pobject = nv_object(priv);
59 if (ret) 59 if (ret)
60 return ret; 60 return ret;
61 61
62 priv->base.intr_map = nv98_mc_intr;
63 return 0; 62 return 0;
64} 63}
65 64
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c5da3babbc62..104175c5a2dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
57 struct nvc0_mc_priv *priv; 57 struct nvc0_mc_priv *priv;
58 int ret; 58 int ret;
59 59
60 ret = nouveau_mc_create(parent, engine, oclass, &priv); 60 ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
61 *pobject = nv_object(priv); 61 *pobject = nv_object(priv);
62 if (ret) 62 if (ret)
63 return ret; 63 return ret;
64 64
65 priv->base.intr_map = nvc0_mc_intr;
66 return 0; 65 return 0;
67} 66}
68 67
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 67fcb6c852ac..ef3133e7575c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -361,7 +361,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
361 361
362 INIT_LIST_HEAD(&vm->pgd_list); 362 INIT_LIST_HEAD(&vm->pgd_list);
363 vm->vmm = vmm; 363 vm->vmm = vmm;
364 vm->refcount = 1; 364 kref_init(&vm->refcount);
365 vm->fpde = offset >> (vmm->pgt_bits + 12); 365 vm->fpde = offset >> (vmm->pgt_bits + 12);
366 vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12); 366 vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
367 367
@@ -441,8 +441,9 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
441} 441}
442 442
443static void 443static void
444nouveau_vm_del(struct nouveau_vm *vm) 444nouveau_vm_del(struct kref *kref)
445{ 445{
446 struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount);
446 struct nouveau_vm_pgd *vpgd, *tmp; 447 struct nouveau_vm_pgd *vpgd, *tmp;
447 448
448 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 449 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
@@ -458,27 +459,19 @@ int
458nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, 459nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
459 struct nouveau_gpuobj *pgd) 460 struct nouveau_gpuobj *pgd)
460{ 461{
461 struct nouveau_vm *vm; 462 if (ref) {
462 int ret; 463 int ret = nouveau_vm_link(ref, pgd);
463
464 vm = ref;
465 if (vm) {
466 ret = nouveau_vm_link(vm, pgd);
467 if (ret) 464 if (ret)
468 return ret; 465 return ret;
469 466
470 vm->refcount++; 467 kref_get(&ref->refcount);
471 } 468 }
472 469
473 vm = *ptr; 470 if (*ptr) {
474 *ptr = ref; 471 nouveau_vm_unlink(*ptr, pgd);
475 472 kref_put(&(*ptr)->refcount, nouveau_vm_del);
476 if (vm) {
477 nouveau_vm_unlink(vm, pgd);
478
479 if (--vm->refcount == 0)
480 nouveau_vm_del(vm);
481 } 473 }
482 474
475 *ptr = ref;
483 return 0; 476 return 0;
484} 477}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0782bd2f1e04..d4fbf11360fe 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -22,6 +22,7 @@
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE. 23 * DEALINGS IN THE SOFTWARE.
24 */ 24 */
25#include <linux/pm_runtime.h>
25 26
26#include <drm/drmP.h> 27#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
@@ -606,6 +607,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
606 regp->ramdac_a34 = 0x1; 607 regp->ramdac_a34 = 0x1;
607} 608}
608 609
610static int
611nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
612{
613 struct nv04_display *disp = nv04_display(crtc->dev);
614 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
615 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
616 int ret;
617
618 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
619 if (ret == 0) {
620 if (disp->image[nv_crtc->index])
621 nouveau_bo_unpin(disp->image[nv_crtc->index]);
622 nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
623 }
624
625 return ret;
626}
627
609/** 628/**
610 * Sets up registers for the given mode/adjusted_mode pair. 629 * Sets up registers for the given mode/adjusted_mode pair.
611 * 630 *
@@ -622,10 +641,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
622 struct drm_device *dev = crtc->dev; 641 struct drm_device *dev = crtc->dev;
623 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 642 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
624 struct nouveau_drm *drm = nouveau_drm(dev); 643 struct nouveau_drm *drm = nouveau_drm(dev);
644 int ret;
625 645
626 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); 646 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
627 drm_mode_debug_printmodeline(adjusted_mode); 647 drm_mode_debug_printmodeline(adjusted_mode);
628 648
649 ret = nv_crtc_swap_fbs(crtc, old_fb);
650 if (ret)
651 return ret;
652
629 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ 653 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
630 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); 654 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
631 655
@@ -722,6 +746,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
722 746
723static void nv_crtc_destroy(struct drm_crtc *crtc) 747static void nv_crtc_destroy(struct drm_crtc *crtc)
724{ 748{
749 struct nv04_display *disp = nv04_display(crtc->dev);
725 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 750 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
726 751
727 if (!nv_crtc) 752 if (!nv_crtc)
@@ -729,6 +754,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
729 754
730 drm_crtc_cleanup(crtc); 755 drm_crtc_cleanup(crtc);
731 756
757 if (disp->image[nv_crtc->index])
758 nouveau_bo_unpin(disp->image[nv_crtc->index]);
759 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
760
732 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 761 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
733 nouveau_bo_unpin(nv_crtc->cursor.nvbo); 762 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
734 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 763 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
@@ -754,6 +783,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
754} 783}
755 784
756static void 785static void
786nv_crtc_disable(struct drm_crtc *crtc)
787{
788 struct nv04_display *disp = nv04_display(crtc->dev);
789 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
790 if (disp->image[nv_crtc->index])
791 nouveau_bo_unpin(disp->image[nv_crtc->index]);
792 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
793}
794
795static void
757nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, 796nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
758 uint32_t size) 797 uint32_t size)
759{ 798{
@@ -791,7 +830,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
791 struct drm_framebuffer *drm_fb; 830 struct drm_framebuffer *drm_fb;
792 struct nouveau_framebuffer *fb; 831 struct nouveau_framebuffer *fb;
793 int arb_burst, arb_lwm; 832 int arb_burst, arb_lwm;
794 int ret;
795 833
796 NV_DEBUG(drm, "index %d\n", nv_crtc->index); 834 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
797 835
@@ -801,10 +839,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
801 return 0; 839 return 0;
802 } 840 }
803 841
804
805 /* If atomic, we want to switch to the fb we were passed, so 842 /* If atomic, we want to switch to the fb we were passed, so
806 * now we update pointers to do that. (We don't pin; just 843 * now we update pointers to do that.
807 * assume we're already pinned and update the base address.)
808 */ 844 */
809 if (atomic) { 845 if (atomic) {
810 drm_fb = passed_fb; 846 drm_fb = passed_fb;
@@ -812,17 +848,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
812 } else { 848 } else {
813 drm_fb = crtc->fb; 849 drm_fb = crtc->fb;
814 fb = nouveau_framebuffer(crtc->fb); 850 fb = nouveau_framebuffer(crtc->fb);
815 /* If not atomic, we can go ahead and pin, and unpin the
816 * old fb we were passed.
817 */
818 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
819 if (ret)
820 return ret;
821
822 if (passed_fb) {
823 struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
824 nouveau_bo_unpin(ofb->nvbo);
825 }
826 } 851 }
827 852
828 nv_crtc->fb.offset = fb->nvbo->bo.offset; 853 nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -877,6 +902,9 @@ static int
877nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 902nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
878 struct drm_framebuffer *old_fb) 903 struct drm_framebuffer *old_fb)
879{ 904{
905 int ret = nv_crtc_swap_fbs(crtc, old_fb);
906 if (ret)
907 return ret;
880 return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); 908 return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
881} 909}
882 910
@@ -1007,13 +1035,59 @@ nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1007 return 0; 1035 return 0;
1008} 1036}
1009 1037
1038int
1039nouveau_crtc_set_config(struct drm_mode_set *set)
1040{
1041 struct drm_device *dev;
1042 struct nouveau_drm *drm;
1043 int ret;
1044 struct drm_crtc *crtc;
1045 bool active = false;
1046 if (!set || !set->crtc)
1047 return -EINVAL;
1048
1049 dev = set->crtc->dev;
1050
1051 /* get a pm reference here */
1052 ret = pm_runtime_get_sync(dev->dev);
1053 if (ret < 0)
1054 return ret;
1055
1056 ret = drm_crtc_helper_set_config(set);
1057
1058 drm = nouveau_drm(dev);
1059
1060 /* if we get here with no crtcs active then we can drop a reference */
1061 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1062 if (crtc->enabled)
1063 active = true;
1064 }
1065
1066 pm_runtime_mark_last_busy(dev->dev);
1067 /* if we have active crtcs and we don't have a power ref,
1068 take the current one */
1069 if (active && !drm->have_disp_power_ref) {
1070 drm->have_disp_power_ref = true;
1071 return ret;
1072 }
1073 /* if we have no active crtcs, then drop the power ref
1074 we got before */
1075 if (!active && drm->have_disp_power_ref) {
1076 pm_runtime_put_autosuspend(dev->dev);
1077 drm->have_disp_power_ref = false;
1078 }
1079 /* drop the power reference we got coming in here */
1080 pm_runtime_put_autosuspend(dev->dev);
1081 return ret;
1082}
1083
1010static const struct drm_crtc_funcs nv04_crtc_funcs = { 1084static const struct drm_crtc_funcs nv04_crtc_funcs = {
1011 .save = nv_crtc_save, 1085 .save = nv_crtc_save,
1012 .restore = nv_crtc_restore, 1086 .restore = nv_crtc_restore,
1013 .cursor_set = nv04_crtc_cursor_set, 1087 .cursor_set = nv04_crtc_cursor_set,
1014 .cursor_move = nv04_crtc_cursor_move, 1088 .cursor_move = nv04_crtc_cursor_move,
1015 .gamma_set = nv_crtc_gamma_set, 1089 .gamma_set = nv_crtc_gamma_set,
1016 .set_config = drm_crtc_helper_set_config, 1090 .set_config = nouveau_crtc_set_config,
1017 .page_flip = nouveau_crtc_page_flip, 1091 .page_flip = nouveau_crtc_page_flip,
1018 .destroy = nv_crtc_destroy, 1092 .destroy = nv_crtc_destroy,
1019}; 1093};
@@ -1027,6 +1101,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
1027 .mode_set_base = nv04_crtc_mode_set_base, 1101 .mode_set_base = nv04_crtc_mode_set_base,
1028 .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, 1102 .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
1029 .load_lut = nv_crtc_gamma_load, 1103 .load_lut = nv_crtc_gamma_load,
1104 .disable = nv_crtc_disable,
1030}; 1105};
1031 1106
1032int 1107int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index a0a031dad13f..9928187f0a7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -81,6 +81,7 @@ struct nv04_display {
81 uint32_t saved_vga_font[4][16384]; 81 uint32_t saved_vga_font[4][16384];
82 uint32_t dac_users[4]; 82 uint32_t dac_users[4];
83 struct nouveau_object *core; 83 struct nouveau_object *core;
84 struct nouveau_bo *image[2];
84}; 85};
85 86
86static inline struct nv04_display * 87static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index d97f20069d3e..dd7d2e182719 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -25,8 +25,27 @@
25#define NOUVEAU_DSM_POWER_SPEED 0x01 25#define NOUVEAU_DSM_POWER_SPEED 0x01
26#define NOUVEAU_DSM_POWER_STAMINA 0x02 26#define NOUVEAU_DSM_POWER_STAMINA 0x02
27 27
28#define NOUVEAU_DSM_OPTIMUS_FN 0x1A 28#define NOUVEAU_DSM_OPTIMUS_CAPS 0x1A
29#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001 29#define NOUVEAU_DSM_OPTIMUS_FLAGS 0x1B
30
31#define NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 (3 << 24)
32#define NOUVEAU_DSM_OPTIMUS_NO_POWERDOWN_PS3 (2 << 24)
33#define NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED (1)
34
35#define NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN (NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 | NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED)
36
37/* result of the optimus caps function */
38#define OPTIMUS_ENABLED (1 << 0)
39#define OPTIMUS_STATUS_MASK (3 << 3)
40#define OPTIMUS_STATUS_OFF (0 << 3)
41#define OPTIMUS_STATUS_ON_ENABLED (1 << 3)
42#define OPTIMUS_STATUS_PWR_STABLE (3 << 3)
43#define OPTIMUS_DISPLAY_HOTPLUG (1 << 6)
44#define OPTIMUS_CAPS_MASK (7 << 24)
45#define OPTIMUS_DYNAMIC_PWR_CAP (1 << 24)
46
47#define OPTIMUS_AUDIO_CAPS_MASK (3 << 27)
48#define OPTIMUS_HDA_CODEC_MASK (2 << 27) /* hda bios control */
30 49
31static struct nouveau_dsm_priv { 50static struct nouveau_dsm_priv {
32 bool dsm_detected; 51 bool dsm_detected;
@@ -251,9 +270,18 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
251 retval |= NOUVEAU_DSM_HAS_MUX; 270 retval |= NOUVEAU_DSM_HAS_MUX;
252 271
253 if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm, 272 if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
254 NOUVEAU_DSM_OPTIMUS_FN)) 273 NOUVEAU_DSM_OPTIMUS_CAPS))
255 retval |= NOUVEAU_DSM_HAS_OPT; 274 retval |= NOUVEAU_DSM_HAS_OPT;
256 275
276 if (retval & NOUVEAU_DSM_HAS_OPT) {
277 uint32_t result;
278 nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0,
279 &result);
280 dev_info(&pdev->dev, "optimus capabilities: %s, status %s%s\n",
281 (result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
282 (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
283 (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
284 }
257 if (retval) 285 if (retval)
258 nouveau_dsm_priv.dhandle = dhandle; 286 nouveau_dsm_priv.dhandle = dhandle;
259 287
@@ -328,8 +356,12 @@ void nouveau_switcheroo_optimus_dsm(void)
328 if (!nouveau_dsm_priv.optimus_detected) 356 if (!nouveau_dsm_priv.optimus_detected)
329 return; 357 return;
330 358
331 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN, 359 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
332 NOUVEAU_DSM_OPTIMUS_ARGS, &result); 360 0x3, &result);
361
362 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS,
363 NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result);
364
333} 365}
334 366
335void nouveau_unregister_dsm_handler(void) 367void nouveau_unregister_dsm_handler(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4b1afb131380..755c38d06271 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
148 148
149 if (unlikely(nvbo->gem)) 149 if (unlikely(nvbo->gem))
150 DRM_ERROR("bo %p still attached to GEM object\n", bo); 150 DRM_ERROR("bo %p still attached to GEM object\n", bo);
151 WARN_ON(nvbo->pin_refcnt > 0);
151 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); 152 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
152 kfree(nvbo); 153 kfree(nvbo);
153} 154}
@@ -197,6 +198,17 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
197 size_t acc_size; 198 size_t acc_size;
198 int ret; 199 int ret;
199 int type = ttm_bo_type_device; 200 int type = ttm_bo_type_device;
201 int lpg_shift = 12;
202 int max_size;
203
204 if (drm->client.base.vm)
205 lpg_shift = drm->client.base.vm->vmm->lpg_shift;
206 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
207
208 if (size <= 0 || size > max_size) {
209 nv_warn(drm, "skipped size %x\n", (u32)size);
210 return -EINVAL;
211 }
200 212
201 if (sg) 213 if (sg)
202 type = ttm_bo_type_sg; 214 type = ttm_bo_type_sg;
@@ -340,13 +352,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
340{ 352{
341 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 353 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
342 struct ttm_buffer_object *bo = &nvbo->bo; 354 struct ttm_buffer_object *bo = &nvbo->bo;
343 int ret; 355 int ret, ref;
344 356
345 ret = ttm_bo_reserve(bo, false, false, false, 0); 357 ret = ttm_bo_reserve(bo, false, false, false, 0);
346 if (ret) 358 if (ret)
347 return ret; 359 return ret;
348 360
349 if (--nvbo->pin_refcnt) 361 ref = --nvbo->pin_refcnt;
362 WARN_ON_ONCE(ref < 0);
363 if (ref)
350 goto out; 364 goto out;
351 365
352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 366 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
@@ -578,7 +592,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
578 int ret = RING_SPACE(chan, 2); 592 int ret = RING_SPACE(chan, 2);
579 if (ret == 0) { 593 if (ret == 0) {
580 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); 594 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
581 OUT_RING (chan, handle); 595 OUT_RING (chan, handle & 0x0000ffff);
582 FIRE_RING (chan); 596 FIRE_RING (chan);
583 } 597 }
584 return ret; 598 return ret;
@@ -973,7 +987,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
973 struct ttm_mem_reg *old_mem = &bo->mem; 987 struct ttm_mem_reg *old_mem = &bo->mem;
974 int ret; 988 int ret;
975 989
976 mutex_lock(&chan->cli->mutex); 990 mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
977 991
978 /* create temporary vmas for the transfer and attach them to the 992 /* create temporary vmas for the transfer and attach them to the
979 * old nouveau_mem node, these will get cleaned up after ttm has 993 * old nouveau_mem node, these will get cleaned up after ttm has
@@ -1014,7 +1028,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1014 struct ttm_mem_reg *, struct ttm_mem_reg *); 1028 struct ttm_mem_reg *, struct ttm_mem_reg *);
1015 int (*init)(struct nouveau_channel *, u32 handle); 1029 int (*init)(struct nouveau_channel *, u32 handle);
1016 } _methods[] = { 1030 } _methods[] = {
1017 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, 1031 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1018 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1032 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1019 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, 1033 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1020 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, 1034 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
@@ -1034,7 +1048,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1034 struct nouveau_channel *chan; 1048 struct nouveau_channel *chan;
1035 u32 handle = (mthd->engine << 16) | mthd->oclass; 1049 u32 handle = (mthd->engine << 16) | mthd->oclass;
1036 1050
1037 if (mthd->init == nve0_bo_move_init) 1051 if (mthd->engine)
1038 chan = drm->cechan; 1052 chan = drm->cechan;
1039 else 1053 else
1040 chan = drm->channel; 1054 chan = drm->channel;
@@ -1251,7 +1265,9 @@ out:
1251static int 1265static int
1252nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 1266nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1253{ 1267{
1254 return 0; 1268 struct nouveau_bo *nvbo = nouveau_bo(bo);
1269
1270 return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
1255} 1271}
1256 1272
1257static int 1273static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 4da776f344d7..c5b36f9e9a10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -26,6 +26,8 @@
26 26
27#include <acpi/button.h> 27#include <acpi/button.h>
28 28
29#include <linux/pm_runtime.h>
30
29#include <drm/drmP.h> 31#include <drm/drmP.h>
30#include <drm/drm_edid.h> 32#include <drm/drm_edid.h>
31#include <drm/drm_crtc_helper.h> 33#include <drm/drm_crtc_helper.h>
@@ -240,6 +242,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
240 struct nouveau_encoder *nv_partner; 242 struct nouveau_encoder *nv_partner;
241 struct nouveau_i2c_port *i2c; 243 struct nouveau_i2c_port *i2c;
242 int type; 244 int type;
245 int ret;
246 enum drm_connector_status conn_status = connector_status_disconnected;
243 247
244 /* Cleanup the previous EDID block. */ 248 /* Cleanup the previous EDID block. */
245 if (nv_connector->edid) { 249 if (nv_connector->edid) {
@@ -248,6 +252,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
248 nv_connector->edid = NULL; 252 nv_connector->edid = NULL;
249 } 253 }
250 254
255 ret = pm_runtime_get_sync(connector->dev->dev);
256 if (ret < 0)
257 return conn_status;
258
251 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); 259 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
252 if (i2c) { 260 if (i2c) {
253 nv_connector->edid = drm_get_edid(connector, &i2c->adapter); 261 nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
@@ -263,7 +271,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
263 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) { 271 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
264 NV_ERROR(drm, "Detected %s, but failed init\n", 272 NV_ERROR(drm, "Detected %s, but failed init\n",
265 drm_get_connector_name(connector)); 273 drm_get_connector_name(connector));
266 return connector_status_disconnected; 274 conn_status = connector_status_disconnected;
275 goto out;
267 } 276 }
268 277
269 /* Override encoder type for DVI-I based on whether EDID 278 /* Override encoder type for DVI-I based on whether EDID
@@ -290,13 +299,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
290 } 299 }
291 300
292 nouveau_connector_set_encoder(connector, nv_encoder); 301 nouveau_connector_set_encoder(connector, nv_encoder);
293 return connector_status_connected; 302 conn_status = connector_status_connected;
303 goto out;
294 } 304 }
295 305
296 nv_encoder = nouveau_connector_of_detect(connector); 306 nv_encoder = nouveau_connector_of_detect(connector);
297 if (nv_encoder) { 307 if (nv_encoder) {
298 nouveau_connector_set_encoder(connector, nv_encoder); 308 nouveau_connector_set_encoder(connector, nv_encoder);
299 return connector_status_connected; 309 conn_status = connector_status_connected;
310 goto out;
300 } 311 }
301 312
302detect_analog: 313detect_analog:
@@ -311,12 +322,18 @@ detect_analog:
311 if (helper->detect(encoder, connector) == 322 if (helper->detect(encoder, connector) ==
312 connector_status_connected) { 323 connector_status_connected) {
313 nouveau_connector_set_encoder(connector, nv_encoder); 324 nouveau_connector_set_encoder(connector, nv_encoder);
314 return connector_status_connected; 325 conn_status = connector_status_connected;
326 goto out;
315 } 327 }
316 328
317 } 329 }
318 330
319 return connector_status_disconnected; 331 out:
332
333 pm_runtime_mark_last_busy(connector->dev->dev);
334 pm_runtime_put_autosuspend(connector->dev->dev);
335
336 return conn_status;
320} 337}
321 338
322static enum drm_connector_status 339static enum drm_connector_status
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 708b2d1c0037..77ffded68837 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
138{ 138{
139 struct nouveau_framebuffer *nouveau_fb; 139 struct nouveau_framebuffer *nouveau_fb;
140 struct drm_gem_object *gem; 140 struct drm_gem_object *gem;
141 int ret; 141 int ret = -ENOMEM;
142 142
143 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); 143 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
144 if (!gem) 144 if (!gem)
@@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
146 146
147 nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); 147 nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
148 if (!nouveau_fb) 148 if (!nouveau_fb)
149 return ERR_PTR(-ENOMEM); 149 goto err_unref;
150 150
151 ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); 151 ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
152 if (ret) { 152 if (ret)
153 drm_gem_object_unreference(gem); 153 goto err;
154 return ERR_PTR(ret);
155 }
156 154
157 return &nouveau_fb->base; 155 return &nouveau_fb->base;
156
157err:
158 kfree(nouveau_fb);
159err_unref:
160 drm_gem_object_unreference(gem);
161 return ERR_PTR(ret);
158} 162}
159 163
160static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 164static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
@@ -390,7 +394,7 @@ nouveau_display_suspend(struct drm_device *dev)
390 394
391 nouveau_display_fini(dev); 395 nouveau_display_fini(dev);
392 396
393 NV_INFO(drm, "unpinning framebuffer(s)...\n"); 397 NV_SUSPEND(drm, "unpinning framebuffer(s)...\n");
394 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 398 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
395 struct nouveau_framebuffer *nouveau_fb; 399 struct nouveau_framebuffer *nouveau_fb;
396 400
@@ -412,7 +416,7 @@ nouveau_display_suspend(struct drm_device *dev)
412} 416}
413 417
414void 418void
415nouveau_display_resume(struct drm_device *dev) 419nouveau_display_repin(struct drm_device *dev)
416{ 420{
417 struct nouveau_drm *drm = nouveau_drm(dev); 421 struct nouveau_drm *drm = nouveau_drm(dev);
418 struct drm_crtc *crtc; 422 struct drm_crtc *crtc;
@@ -437,10 +441,12 @@ nouveau_display_resume(struct drm_device *dev)
437 if (ret) 441 if (ret)
438 NV_ERROR(drm, "Could not pin/map cursor.\n"); 442 NV_ERROR(drm, "Could not pin/map cursor.\n");
439 } 443 }
444}
440 445
441 nouveau_fbcon_set_suspend(dev, 0); 446void
442 nouveau_fbcon_zfill_all(dev); 447nouveau_display_resume(struct drm_device *dev)
443 448{
449 struct drm_crtc *crtc;
444 nouveau_display_init(dev); 450 nouveau_display_init(dev);
445 451
446 /* Force CLUT to get re-loaded during modeset */ 452 /* Force CLUT to get re-loaded during modeset */
@@ -515,7 +521,8 @@ fail:
515 521
516int 522int
517nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 523nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
518 struct drm_pending_vblank_event *event) 524 struct drm_pending_vblank_event *event,
525 uint32_t page_flip_flags)
519{ 526{
520 struct drm_device *dev = crtc->dev; 527 struct drm_device *dev = crtc->dev;
521 struct nouveau_drm *drm = nouveau_drm(dev); 528 struct nouveau_drm *drm = nouveau_drm(dev);
@@ -524,9 +531,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
524 struct nouveau_page_flip_state *s; 531 struct nouveau_page_flip_state *s;
525 struct nouveau_channel *chan = NULL; 532 struct nouveau_channel *chan = NULL;
526 struct nouveau_fence *fence; 533 struct nouveau_fence *fence;
527 struct list_head res; 534 struct ttm_validate_buffer resv[2] = {
528 struct ttm_validate_buffer res_val[2]; 535 { .bo = &old_bo->bo },
536 { .bo = &new_bo->bo },
537 };
529 struct ww_acquire_ctx ticket; 538 struct ww_acquire_ctx ticket;
539 LIST_HEAD(res);
530 int ret; 540 int ret;
531 541
532 if (!drm->channel) 542 if (!drm->channel)
@@ -545,27 +555,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
545 chan = drm->channel; 555 chan = drm->channel;
546 spin_unlock(&old_bo->bo.bdev->fence_lock); 556 spin_unlock(&old_bo->bo.bdev->fence_lock);
547 557
548 mutex_lock(&chan->cli->mutex);
549
550 if (new_bo != old_bo) { 558 if (new_bo != old_bo) {
551 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); 559 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
552 if (likely(!ret)) { 560 if (ret)
553 res_val[0].bo = &old_bo->bo; 561 goto fail_free;
554 res_val[1].bo = &new_bo->bo;
555 INIT_LIST_HEAD(&res);
556 list_add_tail(&res_val[0].head, &res);
557 list_add_tail(&res_val[1].head, &res);
558 ret = ttm_eu_reserve_buffers(&ticket, &res);
559 if (ret)
560 nouveau_bo_unpin(new_bo);
561 }
562 } else
563 ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
564 562
565 if (ret) { 563 list_add(&resv[1].head, &res);
566 mutex_unlock(&chan->cli->mutex);
567 goto fail_free;
568 } 564 }
565 list_add(&resv[0].head, &res);
566
567 mutex_lock(&chan->cli->mutex);
568 ret = ttm_eu_reserve_buffers(&ticket, &res);
569 if (ret)
570 goto fail_unpin;
569 571
570 /* Initialize a page flip struct */ 572 /* Initialize a page flip struct */
571 *s = (struct nouveau_page_flip_state) 573 *s = (struct nouveau_page_flip_state)
@@ -576,10 +578,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
576 /* Emit a page flip */ 578 /* Emit a page flip */
577 if (nv_device(drm->device)->card_type >= NV_50) { 579 if (nv_device(drm->device)->card_type >= NV_50) {
578 ret = nv50_display_flip_next(crtc, fb, chan, 0); 580 ret = nv50_display_flip_next(crtc, fb, chan, 0);
579 if (ret) { 581 if (ret)
580 mutex_unlock(&chan->cli->mutex);
581 goto fail_unreserve; 582 goto fail_unreserve;
582 } 583 } else {
584 struct nv04_display *dispnv04 = nv04_display(dev);
585 nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
583 } 586 }
584 587
585 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 588 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
@@ -590,22 +593,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
590 /* Update the crtc struct and cleanup */ 593 /* Update the crtc struct and cleanup */
591 crtc->fb = fb; 594 crtc->fb = fb;
592 595
593 if (old_bo != new_bo) { 596 ttm_eu_fence_buffer_objects(&ticket, &res, fence);
594 ttm_eu_fence_buffer_objects(&ticket, &res, fence); 597 if (old_bo != new_bo)
595 nouveau_bo_unpin(old_bo); 598 nouveau_bo_unpin(old_bo);
596 } else {
597 nouveau_bo_fence(new_bo, fence);
598 ttm_bo_unreserve(&new_bo->bo);
599 }
600 nouveau_fence_unref(&fence); 599 nouveau_fence_unref(&fence);
601 return 0; 600 return 0;
602 601
603fail_unreserve: 602fail_unreserve:
604 if (old_bo != new_bo) { 603 ttm_eu_backoff_reservation(&ticket, &res);
605 ttm_eu_backoff_reservation(&ticket, &res); 604fail_unpin:
605 mutex_unlock(&chan->cli->mutex);
606 if (old_bo != new_bo)
606 nouveau_bo_unpin(new_bo); 607 nouveau_bo_unpin(new_bo);
607 } else
608 ttm_bo_unreserve(&new_bo->bo);
609fail_free: 608fail_free:
610 kfree(s); 609 kfree(s);
611 return ret; 610 return ret;
@@ -681,13 +680,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
681} 680}
682 681
683int 682int
684nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
685 uint32_t handle)
686{
687 return drm_gem_handle_delete(file_priv, handle);
688}
689
690int
691nouveau_display_dumb_map_offset(struct drm_file *file_priv, 683nouveau_display_dumb_map_offset(struct drm_file *file_priv,
692 struct drm_device *dev, 684 struct drm_device *dev,
693 uint32_t handle, uint64_t *poffset) 685 uint32_t handle, uint64_t *poffset)
@@ -697,7 +689,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
697 gem = drm_gem_object_lookup(dev, file_priv, handle); 689 gem = drm_gem_object_lookup(dev, file_priv, handle);
698 if (gem) { 690 if (gem) {
699 struct nouveau_bo *bo = gem->driver_private; 691 struct nouveau_bo *bo = gem->driver_private;
700 *poffset = bo->bo.addr_space_offset; 692 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
701 drm_gem_object_unreference_unlocked(gem); 693 drm_gem_object_unreference_unlocked(gem);
702 return 0; 694 return 0;
703 } 695 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1ea3e4734b62..025c66f8e0ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -57,10 +57,12 @@ void nouveau_display_destroy(struct drm_device *dev);
57int nouveau_display_init(struct drm_device *dev); 57int nouveau_display_init(struct drm_device *dev);
58void nouveau_display_fini(struct drm_device *dev); 58void nouveau_display_fini(struct drm_device *dev);
59int nouveau_display_suspend(struct drm_device *dev); 59int nouveau_display_suspend(struct drm_device *dev);
60void nouveau_display_repin(struct drm_device *dev);
60void nouveau_display_resume(struct drm_device *dev); 61void nouveau_display_resume(struct drm_device *dev);
61 62
62int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 63int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
63 struct drm_pending_vblank_event *event); 64 struct drm_pending_vblank_event *event,
65 uint32_t page_flip_flags);
64int nouveau_finish_page_flip(struct nouveau_channel *, 66int nouveau_finish_page_flip(struct nouveau_channel *,
65 struct nouveau_page_flip_state *); 67 struct nouveau_page_flip_state *);
66 68
@@ -68,11 +70,10 @@ int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
68 struct drm_mode_create_dumb *args); 70 struct drm_mode_create_dumb *args);
69int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *, 71int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
70 u32 handle, u64 *offset); 72 u32 handle, u64 *offset);
71int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
72 u32 handle);
73 73
74void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *); 74void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
75 75
76int nouveau_crtc_set_config(struct drm_mode_set *set);
76#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 77#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
77extern int nouveau_backlight_init(struct drm_device *); 78extern int nouveau_backlight_init(struct drm_device *);
78extern void nouveau_backlight_exit(struct drm_device *); 79extern void nouveau_backlight_exit(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 218a4b522fe5..8863644024b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -25,7 +25,10 @@
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/pci.h> 27#include <linux/pci.h>
28 28#include <linux/pm_runtime.h>
29#include <linux/vga_switcheroo.h>
30#include "drmP.h"
31#include "drm_crtc_helper.h"
29#include <core/device.h> 32#include <core/device.h>
30#include <core/client.h> 33#include <core/client.h>
31#include <core/gpuobj.h> 34#include <core/gpuobj.h>
@@ -69,6 +72,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
69int nouveau_modeset = -1; 72int nouveau_modeset = -1;
70module_param_named(modeset, nouveau_modeset, int, 0400); 73module_param_named(modeset, nouveau_modeset, int, 0400);
71 74
75MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
76int nouveau_runtime_pm = -1;
77module_param_named(runpm, nouveau_runtime_pm, int, 0400);
78
72static struct drm_driver driver; 79static struct drm_driver driver;
73 80
74static int 81static int
@@ -192,6 +199,18 @@ nouveau_accel_init(struct nouveau_drm *drm)
192 199
193 arg0 = NVE0_CHANNEL_IND_ENGINE_GR; 200 arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
194 arg1 = 1; 201 arg1 = 1;
202 } else
203 if (device->chipset >= 0xa3 &&
204 device->chipset != 0xaa &&
205 device->chipset != 0xac) {
206 ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
207 NVDRM_CHAN + 1, NvDmaFB, NvDmaTT,
208 &drm->cechan);
209 if (ret)
210 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
211
212 arg0 = NvDmaFB;
213 arg1 = NvDmaTT;
195 } else { 214 } else {
196 arg0 = NvDmaFB; 215 arg0 = NvDmaFB;
197 arg1 = NvDmaTT; 216 arg1 = NvDmaTT;
@@ -284,7 +303,30 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
284 return 0; 303 return 0;
285} 304}
286 305
287static struct lock_class_key drm_client_lock_class_key; 306#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
307
308static void
309nouveau_get_hdmi_dev(struct drm_device *dev)
310{
311 struct nouveau_drm *drm = dev->dev_private;
312 struct pci_dev *pdev = dev->pdev;
313
314 /* subfunction one is a hdmi audio device? */
315 drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
316 PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
317
318 if (!drm->hdmi_device) {
319 DRM_INFO("hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
320 return;
321 }
322
323 if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) {
324 DRM_INFO("possible hdmi device not audio %d\n", drm->hdmi_device->class);
325 pci_dev_put(drm->hdmi_device);
326 drm->hdmi_device = NULL;
327 return;
328 }
329}
288 330
289static int 331static int
290nouveau_drm_load(struct drm_device *dev, unsigned long flags) 332nouveau_drm_load(struct drm_device *dev, unsigned long flags)
@@ -297,7 +339,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
297 ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); 339 ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
298 if (ret) 340 if (ret)
299 return ret; 341 return ret;
300 lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
301 342
302 dev->dev_private = drm; 343 dev->dev_private = drm;
303 drm->dev = dev; 344 drm->dev = dev;
@@ -305,6 +346,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
305 INIT_LIST_HEAD(&drm->clients); 346 INIT_LIST_HEAD(&drm->clients);
306 spin_lock_init(&drm->tile.lock); 347 spin_lock_init(&drm->tile.lock);
307 348
349 nouveau_get_hdmi_dev(dev);
350
308 /* make sure AGP controller is in a consistent state before we 351 /* make sure AGP controller is in a consistent state before we
309 * (possibly) execute vbios init tables (see nouveau_agp.h) 352 * (possibly) execute vbios init tables (see nouveau_agp.h)
310 */ 353 */
@@ -379,6 +422,15 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
379 422
380 nouveau_accel_init(drm); 423 nouveau_accel_init(drm);
381 nouveau_fbcon_init(dev); 424 nouveau_fbcon_init(dev);
425
426 if (nouveau_runtime_pm != 0) {
427 pm_runtime_use_autosuspend(dev->dev);
428 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
429 pm_runtime_set_active(dev->dev);
430 pm_runtime_allow(dev->dev);
431 pm_runtime_mark_last_busy(dev->dev);
432 pm_runtime_put(dev->dev);
433 }
382 return 0; 434 return 0;
383 435
384fail_dispinit: 436fail_dispinit:
@@ -400,6 +452,7 @@ nouveau_drm_unload(struct drm_device *dev)
400{ 452{
401 struct nouveau_drm *drm = nouveau_drm(dev); 453 struct nouveau_drm *drm = nouveau_drm(dev);
402 454
455 pm_runtime_get_sync(dev->dev);
403 nouveau_fbcon_fini(dev); 456 nouveau_fbcon_fini(dev);
404 nouveau_accel_fini(drm); 457 nouveau_accel_fini(drm);
405 458
@@ -415,6 +468,8 @@ nouveau_drm_unload(struct drm_device *dev)
415 nouveau_agp_fini(drm); 468 nouveau_agp_fini(drm);
416 nouveau_vga_fini(drm); 469 nouveau_vga_fini(drm);
417 470
471 if (drm->hdmi_device)
472 pci_dev_put(drm->hdmi_device);
418 nouveau_cli_destroy(&drm->client); 473 nouveau_cli_destroy(&drm->client);
419 return 0; 474 return 0;
420} 475}
@@ -441,19 +496,16 @@ nouveau_do_suspend(struct drm_device *dev)
441 int ret; 496 int ret;
442 497
443 if (dev->mode_config.num_crtc) { 498 if (dev->mode_config.num_crtc) {
444 NV_INFO(drm, "suspending fbcon...\n"); 499 NV_SUSPEND(drm, "suspending display...\n");
445 nouveau_fbcon_set_suspend(dev, 1);
446
447 NV_INFO(drm, "suspending display...\n");
448 ret = nouveau_display_suspend(dev); 500 ret = nouveau_display_suspend(dev);
449 if (ret) 501 if (ret)
450 return ret; 502 return ret;
451 } 503 }
452 504
453 NV_INFO(drm, "evicting buffers...\n"); 505 NV_SUSPEND(drm, "evicting buffers...\n");
454 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); 506 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
455 507
456 NV_INFO(drm, "waiting for kernel channels to go idle...\n"); 508 NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n");
457 if (drm->cechan) { 509 if (drm->cechan) {
458 ret = nouveau_channel_idle(drm->cechan); 510 ret = nouveau_channel_idle(drm->cechan);
459 if (ret) 511 if (ret)
@@ -466,7 +518,7 @@ nouveau_do_suspend(struct drm_device *dev)
466 return ret; 518 return ret;
467 } 519 }
468 520
469 NV_INFO(drm, "suspending client object trees...\n"); 521 NV_SUSPEND(drm, "suspending client object trees...\n");
470 if (drm->fence && nouveau_fence(drm)->suspend) { 522 if (drm->fence && nouveau_fence(drm)->suspend) {
471 if (!nouveau_fence(drm)->suspend(drm)) 523 if (!nouveau_fence(drm)->suspend(drm))
472 return -ENOMEM; 524 return -ENOMEM;
@@ -478,7 +530,7 @@ nouveau_do_suspend(struct drm_device *dev)
478 goto fail_client; 530 goto fail_client;
479 } 531 }
480 532
481 NV_INFO(drm, "suspending kernel object tree...\n"); 533 NV_SUSPEND(drm, "suspending kernel object tree...\n");
482 ret = nouveau_client_fini(&drm->client.base, true); 534 ret = nouveau_client_fini(&drm->client.base, true);
483 if (ret) 535 if (ret)
484 goto fail_client; 536 goto fail_client;
@@ -492,7 +544,7 @@ fail_client:
492 } 544 }
493 545
494 if (dev->mode_config.num_crtc) { 546 if (dev->mode_config.num_crtc) {
495 NV_INFO(drm, "resuming display...\n"); 547 NV_SUSPEND(drm, "resuming display...\n");
496 nouveau_display_resume(dev); 548 nouveau_display_resume(dev);
497 } 549 }
498 return ret; 550 return ret;
@@ -504,9 +556,14 @@ int nouveau_pmops_suspend(struct device *dev)
504 struct drm_device *drm_dev = pci_get_drvdata(pdev); 556 struct drm_device *drm_dev = pci_get_drvdata(pdev);
505 int ret; 557 int ret;
506 558
507 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 559 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
560 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
508 return 0; 561 return 0;
509 562
563 if (drm_dev->mode_config.num_crtc)
564 nouveau_fbcon_set_suspend(drm_dev, 1);
565
566 nv_suspend_set_printk_level(NV_DBG_INFO);
510 ret = nouveau_do_suspend(drm_dev); 567 ret = nouveau_do_suspend(drm_dev);
511 if (ret) 568 if (ret)
512 return ret; 569 return ret;
@@ -514,6 +571,7 @@ int nouveau_pmops_suspend(struct device *dev)
514 pci_save_state(pdev); 571 pci_save_state(pdev);
515 pci_disable_device(pdev); 572 pci_disable_device(pdev);
516 pci_set_power_state(pdev, PCI_D3hot); 573 pci_set_power_state(pdev, PCI_D3hot);
574 nv_suspend_set_printk_level(NV_DBG_DEBUG);
517 575
518 return 0; 576 return 0;
519} 577}
@@ -524,15 +582,15 @@ nouveau_do_resume(struct drm_device *dev)
524 struct nouveau_drm *drm = nouveau_drm(dev); 582 struct nouveau_drm *drm = nouveau_drm(dev);
525 struct nouveau_cli *cli; 583 struct nouveau_cli *cli;
526 584
527 NV_INFO(drm, "re-enabling device...\n"); 585 NV_SUSPEND(drm, "re-enabling device...\n");
528 586
529 nouveau_agp_reset(drm); 587 nouveau_agp_reset(drm);
530 588
531 NV_INFO(drm, "resuming kernel object tree...\n"); 589 NV_SUSPEND(drm, "resuming kernel object tree...\n");
532 nouveau_client_init(&drm->client.base); 590 nouveau_client_init(&drm->client.base);
533 nouveau_agp_init(drm); 591 nouveau_agp_init(drm);
534 592
535 NV_INFO(drm, "resuming client object trees...\n"); 593 NV_SUSPEND(drm, "resuming client object trees...\n");
536 if (drm->fence && nouveau_fence(drm)->resume) 594 if (drm->fence && nouveau_fence(drm)->resume)
537 nouveau_fence(drm)->resume(drm); 595 nouveau_fence(drm)->resume(drm);
538 596
@@ -544,9 +602,10 @@ nouveau_do_resume(struct drm_device *dev)
544 nouveau_pm_resume(dev); 602 nouveau_pm_resume(dev);
545 603
546 if (dev->mode_config.num_crtc) { 604 if (dev->mode_config.num_crtc) {
547 NV_INFO(drm, "resuming display...\n"); 605 NV_SUSPEND(drm, "resuming display...\n");
548 nouveau_display_resume(dev); 606 nouveau_display_repin(dev);
549 } 607 }
608
550 return 0; 609 return 0;
551} 610}
552 611
@@ -556,7 +615,8 @@ int nouveau_pmops_resume(struct device *dev)
556 struct drm_device *drm_dev = pci_get_drvdata(pdev); 615 struct drm_device *drm_dev = pci_get_drvdata(pdev);
557 int ret; 616 int ret;
558 617
559 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 618 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
619 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
560 return 0; 620 return 0;
561 621
562 pci_set_power_state(pdev, PCI_D0); 622 pci_set_power_state(pdev, PCI_D0);
@@ -566,23 +626,54 @@ int nouveau_pmops_resume(struct device *dev)
566 return ret; 626 return ret;
567 pci_set_master(pdev); 627 pci_set_master(pdev);
568 628
569 return nouveau_do_resume(drm_dev); 629 nv_suspend_set_printk_level(NV_DBG_INFO);
630 ret = nouveau_do_resume(drm_dev);
631 if (ret) {
632 nv_suspend_set_printk_level(NV_DBG_DEBUG);
633 return ret;
634 }
635 if (drm_dev->mode_config.num_crtc)
636 nouveau_fbcon_set_suspend(drm_dev, 0);
637
638 nouveau_fbcon_zfill_all(drm_dev);
639 nouveau_display_resume(drm_dev);
640 nv_suspend_set_printk_level(NV_DBG_DEBUG);
641 return 0;
570} 642}
571 643
572static int nouveau_pmops_freeze(struct device *dev) 644static int nouveau_pmops_freeze(struct device *dev)
573{ 645{
574 struct pci_dev *pdev = to_pci_dev(dev); 646 struct pci_dev *pdev = to_pci_dev(dev);
575 struct drm_device *drm_dev = pci_get_drvdata(pdev); 647 struct drm_device *drm_dev = pci_get_drvdata(pdev);
648 int ret;
649
650 nv_suspend_set_printk_level(NV_DBG_INFO);
651 if (drm_dev->mode_config.num_crtc)
652 nouveau_fbcon_set_suspend(drm_dev, 1);
576 653
577 return nouveau_do_suspend(drm_dev); 654 ret = nouveau_do_suspend(drm_dev);
655 nv_suspend_set_printk_level(NV_DBG_DEBUG);
656 return ret;
578} 657}
579 658
580static int nouveau_pmops_thaw(struct device *dev) 659static int nouveau_pmops_thaw(struct device *dev)
581{ 660{
582 struct pci_dev *pdev = to_pci_dev(dev); 661 struct pci_dev *pdev = to_pci_dev(dev);
583 struct drm_device *drm_dev = pci_get_drvdata(pdev); 662 struct drm_device *drm_dev = pci_get_drvdata(pdev);
663 int ret;
584 664
585 return nouveau_do_resume(drm_dev); 665 nv_suspend_set_printk_level(NV_DBG_INFO);
666 ret = nouveau_do_resume(drm_dev);
667 if (ret) {
668 nv_suspend_set_printk_level(NV_DBG_DEBUG);
669 return ret;
670 }
671 if (drm_dev->mode_config.num_crtc)
672 nouveau_fbcon_set_suspend(drm_dev, 0);
673 nouveau_fbcon_zfill_all(drm_dev);
674 nouveau_display_resume(drm_dev);
675 nv_suspend_set_printk_level(NV_DBG_DEBUG);
676 return 0;
586} 677}
587 678
588 679
@@ -595,19 +686,24 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
595 char name[32], tmpname[TASK_COMM_LEN]; 686 char name[32], tmpname[TASK_COMM_LEN];
596 int ret; 687 int ret;
597 688
689 /* need to bring up power immediately if opening device */
690 ret = pm_runtime_get_sync(dev->dev);
691 if (ret < 0)
692 return ret;
693
598 get_task_comm(tmpname, current); 694 get_task_comm(tmpname, current);
599 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); 695 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
600 696
601 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli); 697 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
602 if (ret) 698 if (ret)
603 return ret; 699 goto out_suspend;
604 700
605 if (nv_device(drm->device)->card_type >= NV_50) { 701 if (nv_device(drm->device)->card_type >= NV_50) {
606 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), 702 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
607 0x1000, &cli->base.vm); 703 0x1000, &cli->base.vm);
608 if (ret) { 704 if (ret) {
609 nouveau_cli_destroy(cli); 705 nouveau_cli_destroy(cli);
610 return ret; 706 goto out_suspend;
611 } 707 }
612 } 708 }
613 709
@@ -616,7 +712,12 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
616 mutex_lock(&drm->client.mutex); 712 mutex_lock(&drm->client.mutex);
617 list_add(&cli->head, &drm->clients); 713 list_add(&cli->head, &drm->clients);
618 mutex_unlock(&drm->client.mutex); 714 mutex_unlock(&drm->client.mutex);
619 return 0; 715
716out_suspend:
717 pm_runtime_mark_last_busy(dev->dev);
718 pm_runtime_put_autosuspend(dev->dev);
719
720 return ret;
620} 721}
621 722
622static void 723static void
@@ -625,12 +726,15 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
625 struct nouveau_cli *cli = nouveau_cli(fpriv); 726 struct nouveau_cli *cli = nouveau_cli(fpriv);
626 struct nouveau_drm *drm = nouveau_drm(dev); 727 struct nouveau_drm *drm = nouveau_drm(dev);
627 728
729 pm_runtime_get_sync(dev->dev);
730
628 if (cli->abi16) 731 if (cli->abi16)
629 nouveau_abi16_fini(cli->abi16); 732 nouveau_abi16_fini(cli->abi16);
630 733
631 mutex_lock(&drm->client.mutex); 734 mutex_lock(&drm->client.mutex);
632 list_del(&cli->head); 735 list_del(&cli->head);
633 mutex_unlock(&drm->client.mutex); 736 mutex_unlock(&drm->client.mutex);
737
634} 738}
635 739
636static void 740static void
@@ -638,33 +742,52 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
638{ 742{
639 struct nouveau_cli *cli = nouveau_cli(fpriv); 743 struct nouveau_cli *cli = nouveau_cli(fpriv);
640 nouveau_cli_destroy(cli); 744 nouveau_cli_destroy(cli);
745 pm_runtime_mark_last_busy(dev->dev);
746 pm_runtime_put_autosuspend(dev->dev);
641} 747}
642 748
643static struct drm_ioctl_desc 749static const struct drm_ioctl_desc
644nouveau_ioctls[] = { 750nouveau_ioctls[] = {
645 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), 751 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
646 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 752 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
647 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH), 753 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
648 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH), 754 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
649 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), 755 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
650 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH), 756 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
651 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), 757 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
652 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), 758 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
653 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), 759 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
654 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), 760 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
655 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), 761 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
656 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), 762 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
657}; 763};
658 764
765long nouveau_drm_ioctl(struct file *filp,
766 unsigned int cmd, unsigned long arg)
767{
768 struct drm_file *file_priv = filp->private_data;
769 struct drm_device *dev;
770 long ret;
771 dev = file_priv->minor->dev;
772
773 ret = pm_runtime_get_sync(dev->dev);
774 if (ret < 0)
775 return ret;
776
777 ret = drm_ioctl(filp, cmd, arg);
778
779 pm_runtime_mark_last_busy(dev->dev);
780 pm_runtime_put_autosuspend(dev->dev);
781 return ret;
782}
659static const struct file_operations 783static const struct file_operations
660nouveau_driver_fops = { 784nouveau_driver_fops = {
661 .owner = THIS_MODULE, 785 .owner = THIS_MODULE,
662 .open = drm_open, 786 .open = drm_open,
663 .release = drm_release, 787 .release = drm_release,
664 .unlocked_ioctl = drm_ioctl, 788 .unlocked_ioctl = nouveau_drm_ioctl,
665 .mmap = nouveau_ttm_mmap, 789 .mmap = nouveau_ttm_mmap,
666 .poll = drm_poll, 790 .poll = drm_poll,
667 .fasync = drm_fasync,
668 .read = drm_read, 791 .read = drm_read,
669#if defined(CONFIG_COMPAT) 792#if defined(CONFIG_COMPAT)
670 .compat_ioctl = nouveau_compat_ioctl, 793 .compat_ioctl = nouveau_compat_ioctl,
@@ -675,8 +798,8 @@ nouveau_driver_fops = {
675static struct drm_driver 798static struct drm_driver
676driver = { 799driver = {
677 .driver_features = 800 .driver_features =
678 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | 801 DRIVER_USE_AGP |
679 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME, 802 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
680 803
681 .load = nouveau_drm_load, 804 .load = nouveau_drm_load,
682 .unload = nouveau_drm_unload, 805 .unload = nouveau_drm_unload,
@@ -695,6 +818,7 @@ driver = {
695 .disable_vblank = nouveau_drm_vblank_disable, 818 .disable_vblank = nouveau_drm_vblank_disable,
696 819
697 .ioctls = nouveau_ioctls, 820 .ioctls = nouveau_ioctls,
821 .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
698 .fops = &nouveau_driver_fops, 822 .fops = &nouveau_driver_fops,
699 823
700 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 824 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -715,7 +839,7 @@ driver = {
715 839
716 .dumb_create = nouveau_display_dumb_create, 840 .dumb_create = nouveau_display_dumb_create,
717 .dumb_map_offset = nouveau_display_dumb_map_offset, 841 .dumb_map_offset = nouveau_display_dumb_map_offset,
718 .dumb_destroy = nouveau_display_dumb_destroy, 842 .dumb_destroy = drm_gem_dumb_destroy,
719 843
720 .name = DRIVER_NAME, 844 .name = DRIVER_NAME,
721 .desc = DRIVER_DESC, 845 .desc = DRIVER_DESC,
@@ -744,6 +868,90 @@ nouveau_drm_pci_table[] = {
744 {} 868 {}
745}; 869};
746 870
871static int nouveau_pmops_runtime_suspend(struct device *dev)
872{
873 struct pci_dev *pdev = to_pci_dev(dev);
874 struct drm_device *drm_dev = pci_get_drvdata(pdev);
875 int ret;
876
877 if (nouveau_runtime_pm == 0)
878 return -EINVAL;
879
880 drm_kms_helper_poll_disable(drm_dev);
881 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
882 nouveau_switcheroo_optimus_dsm();
883 ret = nouveau_do_suspend(drm_dev);
884 pci_save_state(pdev);
885 pci_disable_device(pdev);
886 pci_set_power_state(pdev, PCI_D3cold);
887 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
888 return ret;
889}
890
891static int nouveau_pmops_runtime_resume(struct device *dev)
892{
893 struct pci_dev *pdev = to_pci_dev(dev);
894 struct drm_device *drm_dev = pci_get_drvdata(pdev);
895 struct nouveau_device *device = nouveau_dev(drm_dev);
896 int ret;
897
898 if (nouveau_runtime_pm == 0)
899 return -EINVAL;
900
901 pci_set_power_state(pdev, PCI_D0);
902 pci_restore_state(pdev);
903 ret = pci_enable_device(pdev);
904 if (ret)
905 return ret;
906 pci_set_master(pdev);
907
908 ret = nouveau_do_resume(drm_dev);
909 nouveau_display_resume(drm_dev);
910 drm_kms_helper_poll_enable(drm_dev);
911 /* do magic */
912 nv_mask(device, 0x88488, (1 << 25), (1 << 25));
913 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
914 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
915 return ret;
916}
917
918static int nouveau_pmops_runtime_idle(struct device *dev)
919{
920 struct pci_dev *pdev = to_pci_dev(dev);
921 struct drm_device *drm_dev = pci_get_drvdata(pdev);
922 struct nouveau_drm *drm = nouveau_drm(drm_dev);
923 struct drm_crtc *crtc;
924
925 if (nouveau_runtime_pm == 0)
926 return -EBUSY;
927
928 /* are we optimus enabled? */
929 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
930 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
931 return -EBUSY;
932 }
933
934 /* if we have a hdmi audio device - make sure it has a driver loaded */
935 if (drm->hdmi_device) {
936 if (!drm->hdmi_device->driver) {
937 DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
938 pm_runtime_mark_last_busy(dev);
939 return -EBUSY;
940 }
941 }
942
943 list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
944 if (crtc->enabled) {
945 DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
946 return -EBUSY;
947 }
948 }
949 pm_runtime_mark_last_busy(dev);
950 pm_runtime_autosuspend(dev);
951 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
952 return 1;
953}
954
747static const struct dev_pm_ops nouveau_pm_ops = { 955static const struct dev_pm_ops nouveau_pm_ops = {
748 .suspend = nouveau_pmops_suspend, 956 .suspend = nouveau_pmops_suspend,
749 .resume = nouveau_pmops_resume, 957 .resume = nouveau_pmops_resume,
@@ -751,6 +959,9 @@ static const struct dev_pm_ops nouveau_pm_ops = {
751 .thaw = nouveau_pmops_thaw, 959 .thaw = nouveau_pmops_thaw,
752 .poweroff = nouveau_pmops_freeze, 960 .poweroff = nouveau_pmops_freeze,
753 .restore = nouveau_pmops_resume, 961 .restore = nouveau_pmops_resume,
962 .runtime_suspend = nouveau_pmops_runtime_suspend,
963 .runtime_resume = nouveau_pmops_runtime_resume,
964 .runtime_idle = nouveau_pmops_runtime_idle,
754}; 965};
755 966
756static struct pci_driver 967static struct pci_driver
@@ -765,8 +976,6 @@ nouveau_drm_pci_driver = {
765static int __init 976static int __init
766nouveau_drm_init(void) 977nouveau_drm_init(void)
767{ 978{
768 driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
769
770 if (nouveau_modeset == -1) { 979 if (nouveau_modeset == -1) {
771#ifdef CONFIG_VGA_CONSOLE 980#ifdef CONFIG_VGA_CONSOLE
772 if (vgacon_text_force()) 981 if (vgacon_text_force())
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 41ff7e0d403a..994fd6ec373b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -70,6 +70,8 @@ nouveau_cli(struct drm_file *fpriv)
70 return fpriv ? fpriv->driver_priv : NULL; 70 return fpriv ? fpriv->driver_priv : NULL;
71} 71}
72 72
73extern int nouveau_runtime_pm;
74
73struct nouveau_drm { 75struct nouveau_drm {
74 struct nouveau_cli client; 76 struct nouveau_cli client;
75 struct drm_device *dev; 77 struct drm_device *dev;
@@ -129,6 +131,12 @@ struct nouveau_drm {
129 131
130 /* power management */ 132 /* power management */
131 struct nouveau_pm *pm; 133 struct nouveau_pm *pm;
134
135 /* display power reference */
136 bool have_disp_power_ref;
137
138 struct dev_pm_domain vga_pm_domain;
139 struct pci_dev *hdmi_device;
132}; 140};
133 141
134static inline struct nouveau_drm * 142static inline struct nouveau_drm *
@@ -146,6 +154,7 @@ nouveau_dev(struct drm_device *dev)
146int nouveau_pmops_suspend(struct device *); 154int nouveau_pmops_suspend(struct device *);
147int nouveau_pmops_resume(struct device *); 155int nouveau_pmops_resume(struct device *);
148 156
157#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args)
149#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) 158#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
150#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) 159#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
151#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) 160#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9352010030e9..8f6d63d7edd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -385,6 +385,7 @@ out_unlock:
385 mutex_unlock(&dev->struct_mutex); 385 mutex_unlock(&dev->struct_mutex);
386 if (chan) 386 if (chan)
387 nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); 387 nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
388 nouveau_bo_unmap(nvbo);
388out_unpin: 389out_unpin:
389 nouveau_bo_unpin(nvbo); 390 nouveau_bo_unpin(nvbo);
390out_unref: 391out_unref:
@@ -397,7 +398,8 @@ void
397nouveau_fbcon_output_poll_changed(struct drm_device *dev) 398nouveau_fbcon_output_poll_changed(struct drm_device *dev)
398{ 399{
399 struct nouveau_drm *drm = nouveau_drm(dev); 400 struct nouveau_drm *drm = nouveau_drm(dev);
400 drm_fb_helper_hotplug_event(&drm->fbcon->helper); 401 if (drm->fbcon)
402 drm_fb_helper_hotplug_event(&drm->fbcon->helper);
401} 403}
402 404
403static int 405static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 1680d9187bab..be3149932c2d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
143 int ret; 143 int ret;
144 144
145 fence->channel = chan; 145 fence->channel = chan;
146 fence->timeout = jiffies + (3 * DRM_HZ); 146 fence->timeout = jiffies + (15 * DRM_HZ);
147 fence->sequence = ++fctx->sequence; 147 fence->sequence = ++fctx->sequence;
148 148
149 ret = fctx->emit(fence); 149 ret = fctx->emit(fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index e72d09c068a8..487242fb3fdc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
50 return; 50 return;
51 nvbo->gem = NULL; 51 nvbo->gem = NULL;
52 52
53 /* Lockdep hates you for doing reserve with gem object lock held */
54 if (WARN_ON_ONCE(nvbo->pin_refcnt)) {
55 nvbo->pin_refcnt = 1;
56 nouveau_bo_unpin(nvbo);
57 }
58
59 if (gem->import_attach) 53 if (gem->import_attach)
60 drm_prime_gem_destroy(gem, nvbo->bo.sg); 54 drm_prime_gem_destroy(gem, nvbo->bo.sg);
61 55
@@ -226,7 +220,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
226 } 220 }
227 221
228 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 222 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
229 rep->map_handle = nvbo->bo.addr_space_offset; 223 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
230 rep->tile_mode = nvbo->tile_mode; 224 rep->tile_mode = nvbo->tile_mode;
231 rep->tile_flags = nvbo->tile_flags; 225 rep->tile_flags = nvbo->tile_flags;
232 return 0; 226 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index 08214bcdcb12..c1a7e5a73a26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -63,7 +63,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
63 if (fn != NULL) 63 if (fn != NULL)
64 ret = (*fn)(filp, cmd, arg); 64 ret = (*fn)(filp, cmd, arg);
65 else 65 else
66 ret = drm_ioctl(filp, cmd, arg); 66 ret = nouveau_drm_ioctl(filp, cmd, arg);
67 67
68 return ret; 68 return ret;
69} 69}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
index ef2b2906d9e6..3b9f2e5463a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioctl.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -2,5 +2,6 @@
2#define __NOUVEAU_IOCTL_H__ 2#define __NOUVEAU_IOCTL_H__
3 3
4long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg); 4long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg);
5long nouveau_drm_ioctl(struct file *, unsigned int cmd, unsigned long arg);
5 6
6#endif 7#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 25d3495725eb..81638d7f2eff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -32,6 +32,9 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
32{ 32{
33 struct drm_device *dev = pci_get_drvdata(pdev); 33 struct drm_device *dev = pci_get_drvdata(pdev);
34 34
35 if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
36 return;
37
35 if (state == VGA_SWITCHEROO_ON) { 38 if (state == VGA_SWITCHEROO_ON) {
36 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); 39 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
37 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 40 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -78,8 +81,17 @@ void
78nouveau_vga_init(struct nouveau_drm *drm) 81nouveau_vga_init(struct nouveau_drm *drm)
79{ 82{
80 struct drm_device *dev = drm->dev; 83 struct drm_device *dev = drm->dev;
84 bool runtime = false;
81 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 85 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
82 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops); 86
87 if (nouveau_runtime_pm == 1)
88 runtime = true;
89 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
90 runtime = true;
91 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
92
93 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
94 vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
83} 95}
84 96
85void 97void
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 8e47a9bae8c3..22aa9963ea6f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
76 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 76 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
77 struct nouveau_object *object; 77 struct nouveau_object *object;
78 u32 start = mem->start * PAGE_SIZE; 78 u32 start = mem->start * PAGE_SIZE;
79 u32 limit = mem->start + mem->size - 1; 79 u32 limit = start + mem->size - 1;
80 int ret = 0; 80 int ret = 0;
81 81
82 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 82 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index 3af5bcd0b203..625f80d53dc2 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
131 if (clk < pll->vco1.max_freq) 131 if (clk < pll->vco1.max_freq)
132 pll->vco2.max_freq = 0; 132 pll->vco2.max_freq = 0;
133 133
134 pclk->pll_calc(pclk, pll, clk, &coef); 134 ret = pclk->pll_calc(pclk, pll, clk, &coef);
135 if (ret == 0) 135 if (ret == 0)
136 return -ERANGE; 136 return -ERANGE;
137 137
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 54dc6355b0c2..9d2092a5ed38 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -355,6 +355,7 @@ struct nv50_oimm {
355 355
356struct nv50_head { 356struct nv50_head {
357 struct nouveau_crtc base; 357 struct nouveau_crtc base;
358 struct nouveau_bo *image;
358 struct nv50_curs curs; 359 struct nv50_curs curs;
359 struct nv50_sync sync; 360 struct nv50_sync sync;
360 struct nv50_ovly ovly; 361 struct nv50_ovly ovly;
@@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
517{ 518{
518 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 519 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
519 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 520 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
521 struct nv50_head *head = nv50_head(crtc);
520 struct nv50_sync *sync = nv50_sync(crtc); 522 struct nv50_sync *sync = nv50_sync(crtc);
521 int head = nv_crtc->index, ret;
522 u32 *push; 523 u32 *push;
524 int ret;
523 525
524 swap_interval <<= 4; 526 swap_interval <<= 4;
525 if (swap_interval == 0) 527 if (swap_interval == 0)
@@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
537 return ret; 539 return ret;
538 540
539 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); 541 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
540 OUT_RING (chan, NvEvoSema0 + head); 542 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
541 OUT_RING (chan, sync->addr ^ 0x10); 543 OUT_RING (chan, sync->addr ^ 0x10);
542 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); 544 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
543 OUT_RING (chan, sync->data + 1); 545 OUT_RING (chan, sync->data + 1);
@@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
546 OUT_RING (chan, sync->data); 548 OUT_RING (chan, sync->data);
547 } else 549 } else
548 if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { 550 if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
549 u64 addr = nv84_fence_crtc(chan, head) + sync->addr; 551 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
550 ret = RING_SPACE(chan, 12); 552 ret = RING_SPACE(chan, 12);
551 if (ret) 553 if (ret)
552 return ret; 554 return ret;
@@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
565 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); 567 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
566 } else 568 } else
567 if (chan) { 569 if (chan) {
568 u64 addr = nv84_fence_crtc(chan, head) + sync->addr; 570 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
569 ret = RING_SPACE(chan, 10); 571 ret = RING_SPACE(chan, 10);
570 if (ret) 572 if (ret)
571 return ret; 573 return ret;
@@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
630 evo_mthd(push, 0x0080, 1); 632 evo_mthd(push, 0x0080, 1);
631 evo_data(push, 0x00000000); 633 evo_data(push, 0x00000000);
632 evo_kick(push, sync); 634 evo_kick(push, sync);
635
636 nouveau_bo_ref(nv_fb->nvbo, &head->image);
633 return 0; 637 return 0;
634} 638}
635 639
@@ -1038,18 +1042,17 @@ static int
1038nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) 1042nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
1039{ 1043{
1040 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); 1044 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
1045 struct nv50_head *head = nv50_head(crtc);
1041 int ret; 1046 int ret;
1042 1047
1043 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); 1048 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
1044 if (ret) 1049 if (ret == 0) {
1045 return ret; 1050 if (head->image)
1046 1051 nouveau_bo_unpin(head->image);
1047 if (old_fb) { 1052 nouveau_bo_ref(nvfb->nvbo, &head->image);
1048 nvfb = nouveau_framebuffer(old_fb);
1049 nouveau_bo_unpin(nvfb->nvbo);
1050 } 1053 }
1051 1054
1052 return 0; 1055 return ret;
1053} 1056}
1054 1057
1055static int 1058static int
@@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
1198 } 1201 }
1199} 1202}
1200 1203
1204static void
1205nv50_crtc_disable(struct drm_crtc *crtc)
1206{
1207 struct nv50_head *head = nv50_head(crtc);
1208 if (head->image)
1209 nouveau_bo_unpin(head->image);
1210 nouveau_bo_ref(NULL, &head->image);
1211}
1212
1201static int 1213static int
1202nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 1214nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
1203 uint32_t handle, uint32_t width, uint32_t height) 1215 uint32_t handle, uint32_t width, uint32_t height)
@@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
1271 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1283 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1272 struct nv50_disp *disp = nv50_disp(crtc->dev); 1284 struct nv50_disp *disp = nv50_disp(crtc->dev);
1273 struct nv50_head *head = nv50_head(crtc); 1285 struct nv50_head *head = nv50_head(crtc);
1286
1274 nv50_dmac_destroy(disp->core, &head->ovly.base); 1287 nv50_dmac_destroy(disp->core, &head->ovly.base);
1275 nv50_pioc_destroy(disp->core, &head->oimm.base); 1288 nv50_pioc_destroy(disp->core, &head->oimm.base);
1276 nv50_dmac_destroy(disp->core, &head->sync.base); 1289 nv50_dmac_destroy(disp->core, &head->sync.base);
1277 nv50_pioc_destroy(disp->core, &head->curs.base); 1290 nv50_pioc_destroy(disp->core, &head->curs.base);
1291
1292 /*XXX: this shouldn't be necessary, but the core doesn't call
1293 * disconnect() during the cleanup paths
1294 */
1295 if (head->image)
1296 nouveau_bo_unpin(head->image);
1297 nouveau_bo_ref(NULL, &head->image);
1298
1278 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 1299 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
1279 if (nv_crtc->cursor.nvbo) 1300 if (nv_crtc->cursor.nvbo)
1280 nouveau_bo_unpin(nv_crtc->cursor.nvbo); 1301 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
1281 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 1302 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
1303
1282 nouveau_bo_unmap(nv_crtc->lut.nvbo); 1304 nouveau_bo_unmap(nv_crtc->lut.nvbo);
1283 if (nv_crtc->lut.nvbo) 1305 if (nv_crtc->lut.nvbo)
1284 nouveau_bo_unpin(nv_crtc->lut.nvbo); 1306 nouveau_bo_unpin(nv_crtc->lut.nvbo);
1285 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 1307 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
1308
1286 drm_crtc_cleanup(crtc); 1309 drm_crtc_cleanup(crtc);
1287 kfree(crtc); 1310 kfree(crtc);
1288} 1311}
@@ -1296,13 +1319,14 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
1296 .mode_set_base = nv50_crtc_mode_set_base, 1319 .mode_set_base = nv50_crtc_mode_set_base,
1297 .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, 1320 .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
1298 .load_lut = nv50_crtc_lut_load, 1321 .load_lut = nv50_crtc_lut_load,
1322 .disable = nv50_crtc_disable,
1299}; 1323};
1300 1324
1301static const struct drm_crtc_funcs nv50_crtc_func = { 1325static const struct drm_crtc_funcs nv50_crtc_func = {
1302 .cursor_set = nv50_crtc_cursor_set, 1326 .cursor_set = nv50_crtc_cursor_set,
1303 .cursor_move = nv50_crtc_cursor_move, 1327 .cursor_move = nv50_crtc_cursor_move,
1304 .gamma_set = nv50_crtc_gamma_set, 1328 .gamma_set = nv50_crtc_gamma_set,
1305 .set_config = drm_crtc_helper_set_config, 1329 .set_config = nouveau_crtc_set_config,
1306 .destroy = nv50_crtc_destroy, 1330 .destroy = nv50_crtc_destroy,
1307 .page_flip = nouveau_crtc_page_flip, 1331 .page_flip = nouveau_crtc_page_flip,
1308}; 1332};
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f9701e567db8..0ee363840035 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
39 struct nv10_fence_chan *fctx; 39 struct nv10_fence_chan *fctx;
40 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 40 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
41 struct nouveau_object *object; 41 struct nouveau_object *object;
42 u32 start = mem->start * PAGE_SIZE;
43 u32 limit = start + mem->size - 1;
42 int ret, i; 44 int ret, i;
43 45
44 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 46 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan)
51 fctx->base.sync = nv17_fence_sync; 53 fctx->base.sync = nv17_fence_sync;
52 54
53 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 55 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
54 NvSema, 0x0002, 56 NvSema, 0x003d,
55 &(struct nv_dma_class) { 57 &(struct nv_dma_class) {
56 .flags = NV_DMA_TARGET_VRAM | 58 .flags = NV_DMA_TARGET_VRAM |
57 NV_DMA_ACCESS_RDWR, 59 NV_DMA_ACCESS_RDWR,
58 .start = mem->start * PAGE_SIZE, 60 .start = start,
59 .limit = mem->size - 1, 61 .limit = limit,
60 }, sizeof(struct nv_dma_class), 62 }, sizeof(struct nv_dma_class),
61 &object); 63 &object);
62 64
63 /* dma objects for display sync channel semaphore blocks */ 65 /* dma objects for display sync channel semaphore blocks */
64 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { 66 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
65 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); 67 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
68 u32 start = bo->bo.mem.start * PAGE_SIZE;
69 u32 limit = start + bo->bo.mem.size - 1;
66 70
67 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 71 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
68 NvEvoSema0 + i, 0x003d, 72 NvEvoSema0 + i, 0x003d,
69 &(struct nv_dma_class) { 73 &(struct nv_dma_class) {
70 .flags = NV_DMA_TARGET_VRAM | 74 .flags = NV_DMA_TARGET_VRAM |
71 NV_DMA_ACCESS_RDWR, 75 NV_DMA_ACCESS_RDWR,
72 .start = bo->bo.offset, 76 .start = start,
73 .limit = bo->bo.offset + 0xfff, 77 .limit = limit,
74 }, sizeof(struct nv_dma_class), 78 }, sizeof(struct nv_dma_class),
75 &object); 79 &object);
76 } 80 }
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index d85e058f2845..778372b062ad 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -18,7 +18,4 @@ omapdrm-y := omap_drv.o \
18 omap_dmm_tiler.o \ 18 omap_dmm_tiler.o \
19 tcm-sita.o 19 tcm-sita.o
20 20
21# temporary:
22omapdrm-y += omap_gem_helpers.o
23
24obj-$(CONFIG_DRM_OMAP) += omapdrm.o 21obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 11a5263a5e9f..0fd2eb139f6e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -331,7 +331,8 @@ static void page_flip_cb(void *arg)
331 331
332static int omap_crtc_page_flip_locked(struct drm_crtc *crtc, 332static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
333 struct drm_framebuffer *fb, 333 struct drm_framebuffer *fb,
334 struct drm_pending_vblank_event *event) 334 struct drm_pending_vblank_event *event,
335 uint32_t page_flip_flags)
335{ 336{
336 struct drm_device *dev = crtc->dev; 337 struct drm_device *dev = crtc->dev;
337 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 338 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9b794c933c81..acf667859cb6 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -871,7 +871,7 @@ int tiler_map_show(struct seq_file *s, void *arg)
871 goto error; 871 goto error;
872 872
873 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) { 873 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
874 memset(map, 0, sizeof(h_adj * sizeof(*map))); 874 memset(map, 0, h_adj * sizeof(*map));
875 memset(global_map, ' ', (w_adj + 1) * h_adj); 875 memset(global_map, ' ', (w_adj + 1) * h_adj);
876 876
877 for (i = 0; i < omap_dmm->container_height; i++) { 877 for (i = 0; i < omap_dmm->container_height; i++) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index a3004f12b9a3..2603d909f49c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -419,7 +419,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
419 return ret; 419 return ret;
420} 420}
421 421
422static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { 422static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
423 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), 423 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
424 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 424 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
425 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), 425 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -524,12 +524,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
524 return 0; 524 return 0;
525} 525}
526 526
527static int dev_firstopen(struct drm_device *dev)
528{
529 DBG("firstopen: dev=%p", dev);
530 return 0;
531}
532
533/** 527/**
534 * lastclose - clean up after all DRM clients have exited 528 * lastclose - clean up after all DRM clients have exited
535 * @dev: DRM device 529 * @dev: DRM device
@@ -598,7 +592,6 @@ static const struct file_operations omapdriver_fops = {
598 .release = drm_release, 592 .release = drm_release,
599 .mmap = omap_gem_mmap, 593 .mmap = omap_gem_mmap,
600 .poll = drm_poll, 594 .poll = drm_poll,
601 .fasync = drm_fasync,
602 .read = drm_read, 595 .read = drm_read,
603 .llseek = noop_llseek, 596 .llseek = noop_llseek,
604}; 597};
@@ -609,7 +602,6 @@ static struct drm_driver omap_drm_driver = {
609 .load = dev_load, 602 .load = dev_load,
610 .unload = dev_unload, 603 .unload = dev_unload,
611 .open = dev_open, 604 .open = dev_open,
612 .firstopen = dev_firstopen,
613 .lastclose = dev_lastclose, 605 .lastclose = dev_lastclose,
614 .preclose = dev_preclose, 606 .preclose = dev_preclose,
615 .postclose = dev_postclose, 607 .postclose = dev_postclose,
@@ -633,7 +625,7 @@ static struct drm_driver omap_drm_driver = {
633 .gem_vm_ops = &omap_gem_vm_ops, 625 .gem_vm_ops = &omap_gem_vm_ops,
634 .dumb_create = omap_gem_dumb_create, 626 .dumb_create = omap_gem_dumb_create,
635 .dumb_map_offset = omap_gem_dumb_map_offset, 627 .dumb_map_offset = omap_gem_dumb_map_offset,
636 .dumb_destroy = omap_gem_dumb_destroy, 628 .dumb_destroy = drm_gem_dumb_destroy,
637 .ioctls = ioctls, 629 .ioctls = ioctls,
638 .num_ioctls = DRM_OMAP_NUM_IOCTLS, 630 .num_ioctls = DRM_OMAP_NUM_IOCTLS,
639 .fops = &omapdriver_fops, 631 .fops = &omapdriver_fops,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 14f17da2ce25..30b95b736658 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -203,9 +203,8 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
203struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 203struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
204 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 204 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
205struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); 205struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
206int omap_framebuffer_replace(struct drm_framebuffer *a, 206int omap_framebuffer_pin(struct drm_framebuffer *fb);
207 struct drm_framebuffer *b, void *arg, 207int omap_framebuffer_unpin(struct drm_framebuffer *fb);
208 void (*unpin)(void *arg, struct drm_gem_object *bo));
209void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 208void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
210 struct omap_drm_window *win, struct omap_overlay_info *info); 209 struct omap_drm_window *win, struct omap_overlay_info *info);
211struct drm_connector *omap_framebuffer_get_next_connector( 210struct drm_connector *omap_framebuffer_get_next_connector(
@@ -225,8 +224,6 @@ int omap_gem_init_object(struct drm_gem_object *obj);
225void *omap_gem_vaddr(struct drm_gem_object *obj); 224void *omap_gem_vaddr(struct drm_gem_object *obj);
226int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 225int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
227 uint32_t handle, uint64_t *offset); 226 uint32_t handle, uint64_t *offset);
228int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
229 uint32_t handle);
230int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 227int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
231 struct drm_mode_create_dumb *args); 228 struct drm_mode_create_dumb *args);
232int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma); 229int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 8031402e7951..f2b8f0668c0c 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -237,55 +237,49 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
237 } 237 }
238} 238}
239 239
240/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although 240/* pin, prepare for scanout: */
241 * buffers to unpin are just pushed to the unpin fifo so that the 241int omap_framebuffer_pin(struct drm_framebuffer *fb)
242 * caller can defer unpin until vblank.
243 *
244 * Note if this fails (ie. something went very wrong!), all buffers are
245 * unpinned, and the caller disables the overlay. We could have tried
246 * to revert back to the previous set of pinned buffers but if things are
247 * hosed there is no guarantee that would succeed.
248 */
249int omap_framebuffer_replace(struct drm_framebuffer *a,
250 struct drm_framebuffer *b, void *arg,
251 void (*unpin)(void *arg, struct drm_gem_object *bo))
252{ 242{
253 int ret = 0, i, na, nb; 243 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
254 struct omap_framebuffer *ofba = to_omap_framebuffer(a); 244 int ret, i, n = drm_format_num_planes(fb->pixel_format);
255 struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
256 uint32_t pinned_mask = 0;
257 245
258 na = a ? drm_format_num_planes(a->pixel_format) : 0; 246 for (i = 0; i < n; i++) {
259 nb = b ? drm_format_num_planes(b->pixel_format) : 0; 247 struct plane *plane = &omap_fb->planes[i];
248 ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
249 if (ret)
250 goto fail;
251 omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
252 }
260 253
261 for (i = 0; i < max(na, nb); i++) { 254 return 0;
262 struct plane *pa, *pb;
263 255
264 pa = (i < na) ? &ofba->planes[i] : NULL; 256fail:
265 pb = (i < nb) ? &ofbb->planes[i] : NULL; 257 for (i--; i >= 0; i--) {
258 struct plane *plane = &omap_fb->planes[i];
259 omap_gem_put_paddr(plane->bo);
260 plane->paddr = 0;
261 }
266 262
267 if (pa) 263 return ret;
268 unpin(arg, pa->bo); 264}
269 265
270 if (pb && !ret) { 266/* unpin, no longer being scanned out: */
271 ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true); 267int omap_framebuffer_unpin(struct drm_framebuffer *fb)
272 if (!ret) { 268{
273 omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE); 269 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
274 pinned_mask |= (1 << i); 270 int ret, i, n = drm_format_num_planes(fb->pixel_format);
275 }
276 }
277 }
278 271
279 if (ret) { 272 for (i = 0; i < n; i++) {
280 /* something went wrong.. unpin what has been pinned */ 273 struct plane *plane = &omap_fb->planes[i];
281 for (i = 0; i < nb; i++) { 274 ret = omap_gem_put_paddr(plane->bo);
282 if (pinned_mask & (1 << i)) { 275 if (ret)
283 struct plane *pb = &ofba->planes[i]; 276 goto fail;
284 unpin(arg, pb->bo); 277 plane->paddr = 0;
285 }
286 }
287 } 278 }
288 279
280 return 0;
281
282fail:
289 return ret; 283 return ret;
290} 284}
291 285
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index ebbdf4132e9c..533f6ebec531 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/shmem_fs.h> 22#include <linux/shmem_fs.h>
23#include <drm/drm_vma_manager.h>
23 24
24#include "omap_drv.h" 25#include "omap_drv.h"
25#include "omap_dmm_tiler.h" 26#include "omap_dmm_tiler.h"
@@ -236,7 +237,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
236 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably 237 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
237 * we actually want CMA memory for it all anyways.. 238 * we actually want CMA memory for it all anyways..
238 */ 239 */
239 pages = _drm_gem_get_pages(obj, GFP_KERNEL); 240 pages = drm_gem_get_pages(obj, GFP_KERNEL);
240 if (IS_ERR(pages)) { 241 if (IS_ERR(pages)) {
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 242 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages); 243 return PTR_ERR(pages);
@@ -270,7 +271,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
270 return 0; 271 return 0;
271 272
272free_pages: 273free_pages:
273 _drm_gem_put_pages(obj, pages, true, false); 274 drm_gem_put_pages(obj, pages, true, false);
274 275
275 return ret; 276 return ret;
276} 277}
@@ -294,7 +295,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
294 kfree(omap_obj->addrs); 295 kfree(omap_obj->addrs);
295 omap_obj->addrs = NULL; 296 omap_obj->addrs = NULL;
296 297
297 _drm_gem_put_pages(obj, omap_obj->pages, true, false); 298 drm_gem_put_pages(obj, omap_obj->pages, true, false);
298 omap_obj->pages = NULL; 299 omap_obj->pages = NULL;
299} 300}
300 301
@@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
308static uint64_t mmap_offset(struct drm_gem_object *obj) 309static uint64_t mmap_offset(struct drm_gem_object *obj)
309{ 310{
310 struct drm_device *dev = obj->dev; 311 struct drm_device *dev = obj->dev;
312 int ret;
313 size_t size;
311 314
312 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 315 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
313 316
314 if (!obj->map_list.map) { 317 /* Make it mmapable */
315 /* Make it mmapable */ 318 size = omap_gem_mmap_size(obj);
316 size_t size = omap_gem_mmap_size(obj); 319 ret = drm_gem_create_mmap_offset_size(obj, size);
317 int ret = _drm_gem_create_mmap_offset_size(obj, size); 320 if (ret) {
318 321 dev_err(dev->dev, "could not allocate mmap offset\n");
319 if (ret) { 322 return 0;
320 dev_err(dev->dev, "could not allocate mmap offset\n");
321 return 0;
322 }
323 } 323 }
324 324
325 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT; 325 return drm_vma_node_offset_addr(&obj->vma_node);
326} 326}
327 327
328uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 328uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
629} 629}
630 630
631/** 631/**
632 * omap_gem_dumb_destroy - destroy a dumb buffer
633 * @file: client file
634 * @dev: our DRM device
635 * @handle: the object handle
636 *
637 * Destroy a handle that was created via omap_gem_dumb_create.
638 */
639int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
640 uint32_t handle)
641{
642 /* No special work needed, drop the reference and see what falls out */
643 return drm_gem_handle_delete(file, handle);
644}
645
646/**
647 * omap_gem_dumb_map - buffer mapping for dumb interface 632 * omap_gem_dumb_map - buffer mapping for dumb interface
648 * @file: our drm client file 633 * @file: our drm client file
649 * @dev: drm device 634 * @dev: drm device
@@ -997,12 +982,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
997{ 982{
998 struct drm_device *dev = obj->dev; 983 struct drm_device *dev = obj->dev;
999 struct omap_gem_object *omap_obj = to_omap_bo(obj); 984 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1000 uint64_t off = 0; 985 uint64_t off;
1001 986
1002 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 987 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1003 988
1004 if (obj->map_list.map) 989 off = drm_vma_node_start(&obj->vma_node);
1005 off = (uint64_t)obj->map_list.hash.key;
1006 990
1007 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", 991 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
1008 omap_obj->flags, obj->name, obj->refcount.refcount.counter, 992 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
@@ -1309,8 +1293,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
1309 1293
1310 list_del(&omap_obj->mm_list); 1294 list_del(&omap_obj->mm_list);
1311 1295
1312 if (obj->map_list.map) 1296 drm_gem_free_mmap_offset(obj);
1313 drm_gem_free_mmap_offset(obj);
1314 1297
1315 /* this means the object is still pinned.. which really should 1298 /* this means the object is still pinned.. which really should
1316 * not happen. I think.. 1299 * not happen. I think..
@@ -1427,8 +1410,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1427 omap_obj->height = gsize.tiled.height; 1410 omap_obj->height = gsize.tiled.height;
1428 } 1411 }
1429 1412
1413 ret = 0;
1430 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) 1414 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1431 ret = drm_gem_private_object_init(dev, obj, size); 1415 drm_gem_private_object_init(dev, obj, size);
1432 else 1416 else
1433 ret = drm_gem_object_init(dev, obj, size); 1417 ret = drm_gem_object_init(dev, obj, size);
1434 1418
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
deleted file mode 100644
index f9eb679eb79b..000000000000
--- a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
+++ /dev/null
@@ -1,169 +0,0 @@
1/*
2 * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20/* temporary copy of drm_gem_{get,put}_pages() until the
21 * "drm/gem: add functions to get/put pages" patch is merged..
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/shmem_fs.h>
27
28#include <drm/drmP.h>
29
30/**
31 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
32 * @obj: obj in question
33 * @gfpmask: gfp mask of requested pages
34 */
35struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
36{
37 struct inode *inode;
38 struct address_space *mapping;
39 struct page *p, **pages;
40 int i, npages;
41
42 /* This is the shared memory object that backs the GEM resource */
43 inode = file_inode(obj->filp);
44 mapping = inode->i_mapping;
45
46 npages = obj->size >> PAGE_SHIFT;
47
48 pages = drm_malloc_ab(npages, sizeof(struct page *));
49 if (pages == NULL)
50 return ERR_PTR(-ENOMEM);
51
52 gfpmask |= mapping_gfp_mask(mapping);
53
54 for (i = 0; i < npages; i++) {
55 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
56 if (IS_ERR(p))
57 goto fail;
58 pages[i] = p;
59
60 /* There is a hypothetical issue w/ drivers that require
61 * buffer memory in the low 4GB.. if the pages are un-
62 * pinned, and swapped out, they can end up swapped back
63 * in above 4GB. If pages are already in memory, then
64 * shmem_read_mapping_page_gfp will ignore the gfpmask,
65 * even if the already in-memory page disobeys the mask.
66 *
67 * It is only a theoretical issue today, because none of
68 * the devices with this limitation can be populated with
69 * enough memory to trigger the issue. But this BUG_ON()
70 * is here as a reminder in case the problem with
71 * shmem_read_mapping_page_gfp() isn't solved by the time
72 * it does become a real issue.
73 *
74 * See this thread: http://lkml.org/lkml/2011/7/11/238
75 */
76 BUG_ON((gfpmask & __GFP_DMA32) &&
77 (page_to_pfn(p) >= 0x00100000UL));
78 }
79
80 return pages;
81
82fail:
83 while (i--)
84 page_cache_release(pages[i]);
85
86 drm_free_large(pages);
87 return ERR_CAST(p);
88}
89
90/**
91 * drm_gem_put_pages - helper to free backing pages for a GEM object
92 * @obj: obj in question
93 * @pages: pages to free
94 */
95void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
96 bool dirty, bool accessed)
97{
98 int i, npages;
99
100 npages = obj->size >> PAGE_SHIFT;
101
102 for (i = 0; i < npages; i++) {
103 if (dirty)
104 set_page_dirty(pages[i]);
105
106 if (accessed)
107 mark_page_accessed(pages[i]);
108
109 /* Undo the reference we took when populating the table */
110 page_cache_release(pages[i]);
111 }
112
113 drm_free_large(pages);
114}
115
116int
117_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
118{
119 struct drm_device *dev = obj->dev;
120 struct drm_gem_mm *mm = dev->mm_private;
121 struct drm_map_list *list;
122 struct drm_local_map *map;
123 int ret = 0;
124
125 /* Set the object up for mmap'ing */
126 list = &obj->map_list;
127 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
128 if (!list->map)
129 return -ENOMEM;
130
131 map = list->map;
132 map->type = _DRM_GEM;
133 map->size = size;
134 map->handle = obj;
135
136 /* Get a DRM GEM mmap offset allocated... */
137 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
138 size / PAGE_SIZE, 0, 0);
139
140 if (!list->file_offset_node) {
141 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
142 ret = -ENOSPC;
143 goto out_free_list;
144 }
145
146 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
147 size / PAGE_SIZE, 0);
148 if (!list->file_offset_node) {
149 ret = -ENOMEM;
150 goto out_free_list;
151 }
152
153 list->hash.key = list->file_offset_node->start;
154 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
155 if (ret) {
156 DRM_ERROR("failed to add to map hash\n");
157 goto out_free_mm;
158 }
159
160 return 0;
161
162out_free_mm:
163 drm_mm_put_block(list->file_offset_node);
164out_free_list:
165 kfree(list->map);
166 list->map = NULL;
167
168 return ret;
169}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 8d225d7ff4e3..046d5e660c04 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,7 +17,7 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/kfifo.h> 20#include "drm_flip_work.h"
21 21
22#include "omap_drv.h" 22#include "omap_drv.h"
23#include "omap_dmm_tiler.h" 23#include "omap_dmm_tiler.h"
@@ -58,26 +58,23 @@ struct omap_plane {
58 58
59 struct omap_drm_irq error_irq; 59 struct omap_drm_irq error_irq;
60 60
61 /* set of bo's pending unpin until next post_apply() */ 61 /* for deferring bo unpin's until next post_apply(): */
62 DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *); 62 struct drm_flip_work unpin_work;
63 63
64 // XXX maybe get rid of this and handle vblank in crtc too? 64 // XXX maybe get rid of this and handle vblank in crtc too?
65 struct callback apply_done_cb; 65 struct callback apply_done_cb;
66}; 66};
67 67
68static void unpin(void *arg, struct drm_gem_object *bo) 68static void unpin_worker(struct drm_flip_work *work, void *val)
69{ 69{
70 struct drm_plane *plane = arg; 70 struct omap_plane *omap_plane =
71 struct omap_plane *omap_plane = to_omap_plane(plane); 71 container_of(work, struct omap_plane, unpin_work);
72 struct drm_device *dev = omap_plane->base.dev;
72 73
73 if (kfifo_put(&omap_plane->unpin_fifo, 74 omap_framebuffer_unpin(val);
74 (const struct drm_gem_object **)&bo)) { 75 mutex_lock(&dev->mode_config.mutex);
75 /* also hold a ref so it isn't free'd while pinned */ 76 drm_framebuffer_unreference(val);
76 drm_gem_object_reference(bo); 77 mutex_unlock(&dev->mode_config.mutex);
77 } else {
78 dev_err(plane->dev->dev, "unpin fifo full!\n");
79 omap_gem_put_paddr(bo);
80 }
81} 78}
82 79
83/* update which fb (if any) is pinned for scanout */ 80/* update which fb (if any) is pinned for scanout */
@@ -87,23 +84,22 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
87 struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb; 84 struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
88 85
89 if (pinned_fb != fb) { 86 if (pinned_fb != fb) {
90 int ret; 87 int ret = 0;
91 88
92 DBG("%p -> %p", pinned_fb, fb); 89 DBG("%p -> %p", pinned_fb, fb);
93 90
94 if (fb) 91 if (fb) {
95 drm_framebuffer_reference(fb); 92 drm_framebuffer_reference(fb);
96 93 ret = omap_framebuffer_pin(fb);
97 ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin); 94 }
98 95
99 if (pinned_fb) 96 if (pinned_fb)
100 drm_framebuffer_unreference(pinned_fb); 97 drm_flip_work_queue(&omap_plane->unpin_work, pinned_fb);
101 98
102 if (ret) { 99 if (ret) {
103 dev_err(plane->dev->dev, "could not swap %p -> %p\n", 100 dev_err(plane->dev->dev, "could not swap %p -> %p\n",
104 omap_plane->pinned_fb, fb); 101 omap_plane->pinned_fb, fb);
105 if (fb) 102 drm_framebuffer_unreference(fb);
106 drm_framebuffer_unreference(fb);
107 omap_plane->pinned_fb = NULL; 103 omap_plane->pinned_fb = NULL;
108 return ret; 104 return ret;
109 } 105 }
@@ -170,17 +166,14 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
170 struct omap_plane *omap_plane = 166 struct omap_plane *omap_plane =
171 container_of(apply, struct omap_plane, apply); 167 container_of(apply, struct omap_plane, apply);
172 struct drm_plane *plane = &omap_plane->base; 168 struct drm_plane *plane = &omap_plane->base;
169 struct omap_drm_private *priv = plane->dev->dev_private;
173 struct omap_overlay_info *info = &omap_plane->info; 170 struct omap_overlay_info *info = &omap_plane->info;
174 struct drm_gem_object *bo = NULL;
175 struct callback cb; 171 struct callback cb;
176 172
177 cb = omap_plane->apply_done_cb; 173 cb = omap_plane->apply_done_cb;
178 omap_plane->apply_done_cb.fxn = NULL; 174 omap_plane->apply_done_cb.fxn = NULL;
179 175
180 while (kfifo_get(&omap_plane->unpin_fifo, &bo)) { 176 drm_flip_work_commit(&omap_plane->unpin_work, priv->wq);
181 omap_gem_put_paddr(bo);
182 drm_gem_object_unreference_unlocked(bo);
183 }
184 177
185 if (cb.fxn) 178 if (cb.fxn)
186 cb.fxn(cb.arg); 179 cb.fxn(cb.arg);
@@ -277,8 +270,7 @@ static void omap_plane_destroy(struct drm_plane *plane)
277 omap_plane_disable(plane); 270 omap_plane_disable(plane);
278 drm_plane_cleanup(plane); 271 drm_plane_cleanup(plane);
279 272
280 WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo)); 273 drm_flip_work_cleanup(&omap_plane->unpin_work);
281 kfifo_free(&omap_plane->unpin_fifo);
282 274
283 kfree(omap_plane); 275 kfree(omap_plane);
284} 276}
@@ -399,7 +391,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
399 if (!omap_plane) 391 if (!omap_plane)
400 goto fail; 392 goto fail;
401 393
402 ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL); 394 ret = drm_flip_work_init(&omap_plane->unpin_work, 16,
395 "unpin", unpin_worker);
403 if (ret) { 396 if (ret) {
404 dev_err(dev->dev, "could not allocate unpin FIFO\n"); 397 dev_err(dev->dev, "could not allocate unpin FIFO\n");
405 goto fail; 398 goto fail;
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 93c2f2cceb51..eb89653a7a17 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
179 uint32_t type, bool interruptible) 179 uint32_t type, bool interruptible)
180{ 180{
181 struct qxl_command cmd; 181 struct qxl_command cmd;
182 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
182 183
183 cmd.type = type; 184 cmd.type = type;
184 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); 185 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
185 186
186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); 187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
187} 188}
@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
191 uint32_t type, bool interruptible) 192 uint32_t type, bool interruptible)
192{ 193{
193 struct qxl_command cmd; 194 struct qxl_command cmd;
195 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
194 196
195 cmd.type = type; 197 cmd.type = type;
196 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); 198 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
197 199
198 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); 200 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
199} 201}
@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev)
214 struct qxl_release *release; 216 struct qxl_release *release;
215 uint64_t id, next_id; 217 uint64_t id, next_id;
216 int i = 0; 218 int i = 0;
217 int ret;
218 union qxl_release_info *info; 219 union qxl_release_info *info;
219 220
220 while (qxl_ring_pop(qdev->release_ring, &id)) { 221 while (qxl_ring_pop(qdev->release_ring, &id)) {
@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev)
224 if (release == NULL) 225 if (release == NULL)
225 break; 226 break;
226 227
227 ret = qxl_release_reserve(qdev, release, false);
228 if (ret) {
229 qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
230 DRM_ERROR("failed to reserve release %lld\n", id);
231 }
232
233 info = qxl_release_map(qdev, release); 228 info = qxl_release_map(qdev, release);
234 next_id = info->next; 229 next_id = info->next;
235 qxl_release_unmap(qdev, release, info); 230 qxl_release_unmap(qdev, release, info);
236 231
237 qxl_release_unreserve(qdev, release);
238 QXL_INFO(qdev, "popped %lld, next %lld\n", id, 232 QXL_INFO(qdev, "popped %lld, next %lld\n", id,
239 next_id); 233 next_id);
240 234
@@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev)
259 return i; 253 return i;
260} 254}
261 255
262int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, 256int qxl_alloc_bo_reserved(struct qxl_device *qdev,
257 struct qxl_release *release,
258 unsigned long size,
263 struct qxl_bo **_bo) 259 struct qxl_bo **_bo)
264{ 260{
265 struct qxl_bo *bo; 261 struct qxl_bo *bo;
266 int ret; 262 int ret;
267 263
268 ret = qxl_bo_create(qdev, size, false /* not kernel - device */, 264 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
269 QXL_GEM_DOMAIN_VRAM, NULL, &bo); 265 false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
270 if (ret) { 266 if (ret) {
271 DRM_ERROR("failed to allocate VRAM BO\n"); 267 DRM_ERROR("failed to allocate VRAM BO\n");
272 return ret; 268 return ret;
273 } 269 }
274 ret = qxl_bo_reserve(bo, false); 270 ret = qxl_release_list_add(release, bo);
275 if (unlikely(ret != 0)) 271 if (ret)
276 goto out_unref; 272 goto out_unref;
277 273
278 *_bo = bo; 274 *_bo = bo;
279 return 0; 275 return 0;
280out_unref: 276out_unref:
281 qxl_bo_unref(&bo); 277 qxl_bo_unref(&bo);
282 return 0; 278 return ret;
283} 279}
284 280
285static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) 281static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
503 if (ret) 499 if (ret)
504 return ret; 500 return ret;
505 501
502 ret = qxl_release_reserve_list(release, true);
503 if (ret)
504 return ret;
505
506 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); 506 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
507 cmd->type = QXL_SURFACE_CMD_CREATE; 507 cmd->type = QXL_SURFACE_CMD_CREATE;
508 cmd->u.surface_create.format = surf->surf.format; 508 cmd->u.surface_create.format = surf->surf.format;
@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
524 524
525 surf->surf_create = release; 525 surf->surf_create = release;
526 526
527 /* no need to add a release to the fence for this bo, 527 /* no need to add a release to the fence for this surface bo,
528 since it is only released when we ask to destroy the surface 528 since it is only released when we ask to destroy the surface
529 and it would never signal otherwise */ 529 and it would never signal otherwise */
530 qxl_fence_releaseable(qdev, release);
531
532 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 530 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
533 531 qxl_release_fence_buffer_objects(release);
534 qxl_release_unreserve(qdev, release);
535 532
536 surf->hw_surf_alloc = true; 533 surf->hw_surf_alloc = true;
537 spin_lock(&qdev->surf_id_idr_lock); 534 spin_lock(&qdev->surf_id_idr_lock);
@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
573 cmd->surface_id = id; 570 cmd->surface_id = id;
574 qxl_release_unmap(qdev, release, &cmd->release_info); 571 qxl_release_unmap(qdev, release, &cmd->release_info);
575 572
576 qxl_fence_releaseable(qdev, release);
577
578 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 573 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
579 574
580 qxl_release_unreserve(qdev, release); 575 qxl_release_fence_buffer_objects(release);
581
582 576
583 return 0; 577 return 0;
584} 578}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index f76f5dd7bfc4..835caba026d3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
179 kfree(qxl_crtc); 179 kfree(qxl_crtc);
180} 180}
181 181
182static void 182static int
183qxl_hide_cursor(struct qxl_device *qdev) 183qxl_hide_cursor(struct qxl_device *qdev)
184{ 184{
185 struct qxl_release *release; 185 struct qxl_release *release;
@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev)
188 188
189 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, 189 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
190 &release, NULL); 190 &release, NULL);
191 if (ret)
192 return ret;
193
194 ret = qxl_release_reserve_list(release, true);
195 if (ret) {
196 qxl_release_free(qdev, release);
197 return ret;
198 }
191 199
192 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 200 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
193 cmd->type = QXL_CURSOR_HIDE; 201 cmd->type = QXL_CURSOR_HIDE;
194 qxl_release_unmap(qdev, release, &cmd->release_info); 202 qxl_release_unmap(qdev, release, &cmd->release_info);
195 203
196 qxl_fence_releaseable(qdev, release);
197 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 204 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
198 qxl_release_unreserve(qdev, release); 205 qxl_release_fence_buffer_objects(release);
206 return 0;
199} 207}
200 208
201static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, 209static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
216 224
217 int size = 64*64*4; 225 int size = 64*64*4;
218 int ret = 0; 226 int ret = 0;
219 if (!handle) { 227 if (!handle)
220 qxl_hide_cursor(qdev); 228 return qxl_hide_cursor(qdev);
221 return 0;
222 }
223 229
224 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 230 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
225 if (!obj) { 231 if (!obj) {
@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
234 goto out_unref; 240 goto out_unref;
235 241
236 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); 242 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
243 qxl_bo_unreserve(user_bo);
237 if (ret) 244 if (ret)
238 goto out_unreserve; 245 goto out_unref;
239 246
240 ret = qxl_bo_kmap(user_bo, &user_ptr); 247 ret = qxl_bo_kmap(user_bo, &user_ptr);
241 if (ret) 248 if (ret)
@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
246 &release, NULL); 253 &release, NULL);
247 if (ret) 254 if (ret)
248 goto out_kunmap; 255 goto out_kunmap;
249 ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, 256
250 &cursor_bo); 257 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size,
258 &cursor_bo);
251 if (ret) 259 if (ret)
252 goto out_free_release; 260 goto out_free_release;
253 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); 261
262 ret = qxl_release_reserve_list(release, false);
254 if (ret) 263 if (ret)
255 goto out_free_bo; 264 goto out_free_bo;
256 265
266 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
267 if (ret)
268 goto out_backoff;
269
257 cursor->header.unique = 0; 270 cursor->header.unique = 0;
258 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; 271 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
259 cursor->header.width = 64; 272 cursor->header.width = 64;
@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
269 282
270 qxl_bo_kunmap(cursor_bo); 283 qxl_bo_kunmap(cursor_bo);
271 284
272 /* finish with the userspace bo */
273 qxl_bo_kunmap(user_bo); 285 qxl_bo_kunmap(user_bo);
274 qxl_bo_unpin(user_bo);
275 qxl_bo_unreserve(user_bo);
276 drm_gem_object_unreference_unlocked(obj);
277 286
278 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 287 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
279 cmd->type = QXL_CURSOR_SET; 288 cmd->type = QXL_CURSOR_SET;
@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
281 cmd->u.set.position.y = qcrtc->cur_y; 290 cmd->u.set.position.y = qcrtc->cur_y;
282 291
283 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); 292 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
284 qxl_release_add_res(qdev, release, cursor_bo);
285 293
286 cmd->u.set.visible = 1; 294 cmd->u.set.visible = 1;
287 qxl_release_unmap(qdev, release, &cmd->release_info); 295 qxl_release_unmap(qdev, release, &cmd->release_info);
288 296
289 qxl_fence_releaseable(qdev, release);
290 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 297 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
291 qxl_release_unreserve(qdev, release); 298 qxl_release_fence_buffer_objects(release);
299
300 /* finish with the userspace bo */
301 ret = qxl_bo_reserve(user_bo, false);
302 if (!ret) {
303 qxl_bo_unpin(user_bo);
304 qxl_bo_unreserve(user_bo);
305 }
306 drm_gem_object_unreference_unlocked(obj);
292 307
293 qxl_bo_unreserve(cursor_bo);
294 qxl_bo_unref(&cursor_bo); 308 qxl_bo_unref(&cursor_bo);
295 309
296 return ret; 310 return ret;
311
312out_backoff:
313 qxl_release_backoff_reserve_list(release);
297out_free_bo: 314out_free_bo:
298 qxl_bo_unref(&cursor_bo); 315 qxl_bo_unref(&cursor_bo);
299out_free_release: 316out_free_release:
300 qxl_release_unreserve(qdev, release);
301 qxl_release_free(qdev, release); 317 qxl_release_free(qdev, release);
302out_kunmap: 318out_kunmap:
303 qxl_bo_kunmap(user_bo); 319 qxl_bo_kunmap(user_bo);
304out_unpin: 320out_unpin:
305 qxl_bo_unpin(user_bo); 321 qxl_bo_unpin(user_bo);
306out_unreserve:
307 qxl_bo_unreserve(user_bo);
308out_unref: 322out_unref:
309 drm_gem_object_unreference_unlocked(obj); 323 drm_gem_object_unreference_unlocked(obj);
310 return ret; 324 return ret;
@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
322 336
323 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, 337 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
324 &release, NULL); 338 &release, NULL);
339 if (ret)
340 return ret;
341
342 ret = qxl_release_reserve_list(release, true);
343 if (ret) {
344 qxl_release_free(qdev, release);
345 return ret;
346 }
325 347
326 qcrtc->cur_x = x; 348 qcrtc->cur_x = x;
327 qcrtc->cur_y = y; 349 qcrtc->cur_y = y;
@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
332 cmd->u.position.y = qcrtc->cur_y; 354 cmd->u.position.y = qcrtc->cur_y;
333 qxl_release_unmap(qdev, release, &cmd->release_info); 355 qxl_release_unmap(qdev, release, &cmd->release_info);
334 356
335 qxl_fence_releaseable(qdev, release);
336 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 357 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
337 qxl_release_unreserve(qdev, release); 358 qxl_release_fence_buffer_objects(release);
359
338 return 0; 360 return 0;
339} 361}
340 362
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 3c8c3dbf9378..56e1d633875e 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -23,25 +23,29 @@
23#include "qxl_drv.h" 23#include "qxl_drv.h"
24#include "qxl_object.h" 24#include "qxl_object.h"
25 25
26static int alloc_clips(struct qxl_device *qdev,
27 struct qxl_release *release,
28 unsigned num_clips,
29 struct qxl_bo **clips_bo)
30{
31 int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
32
33 return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
34}
35
26/* returns a pointer to the already allocated qxl_rect array inside 36/* returns a pointer to the already allocated qxl_rect array inside
27 * the qxl_clip_rects. This is *not* the same as the memory allocated 37 * the qxl_clip_rects. This is *not* the same as the memory allocated
28 * on the device, it is offset to qxl_clip_rects.chunk.data */ 38 * on the device, it is offset to qxl_clip_rects.chunk.data */
29static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, 39static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
30 struct qxl_drawable *drawable, 40 struct qxl_drawable *drawable,
31 unsigned num_clips, 41 unsigned num_clips,
32 struct qxl_bo **clips_bo, 42 struct qxl_bo *clips_bo)
33 struct qxl_release *release)
34{ 43{
35 struct qxl_clip_rects *dev_clips; 44 struct qxl_clip_rects *dev_clips;
36 int ret; 45 int ret;
37 int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
38 ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
39 if (ret)
40 return NULL;
41 46
42 ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); 47 ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips);
43 if (ret) { 48 if (ret) {
44 qxl_bo_unref(clips_bo);
45 return NULL; 49 return NULL;
46 } 50 }
47 dev_clips->num_rects = num_clips; 51 dev_clips->num_rects = num_clips;
@@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
52} 56}
53 57
54static int 58static int
59alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
60{
61 int ret;
62 ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
63 QXL_RELEASE_DRAWABLE, release,
64 NULL);
65 return ret;
66}
67
68static void
69free_drawable(struct qxl_device *qdev, struct qxl_release *release)
70{
71 qxl_release_free(qdev, release);
72}
73
74/* release needs to be reserved at this point */
75static int
55make_drawable(struct qxl_device *qdev, int surface, uint8_t type, 76make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
56 const struct qxl_rect *rect, 77 const struct qxl_rect *rect,
57 struct qxl_release **release) 78 struct qxl_release *release)
58{ 79{
59 struct qxl_drawable *drawable; 80 struct qxl_drawable *drawable;
60 int i, ret; 81 int i;
61 82
62 ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), 83 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
63 QXL_RELEASE_DRAWABLE, release, 84 if (!drawable)
64 NULL); 85 return -ENOMEM;
65 if (ret)
66 return ret;
67 86
68 drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
69 drawable->type = type; 87 drawable->type = type;
70 88
71 drawable->surface_id = surface; /* Only primary for now */ 89 drawable->surface_id = surface; /* Only primary for now */
@@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
91 drawable->bbox = *rect; 109 drawable->bbox = *rect;
92 110
93 drawable->mm_time = qdev->rom->mm_clock; 111 drawable->mm_time = qdev->rom->mm_clock;
94 qxl_release_unmap(qdev, *release, &drawable->release_info); 112 qxl_release_unmap(qdev, release, &drawable->release_info);
95 return 0; 113 return 0;
96} 114}
97 115
98static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, 116static int alloc_palette_object(struct qxl_device *qdev,
117 struct qxl_release *release,
118 struct qxl_bo **palette_bo)
119{
120 return qxl_alloc_bo_reserved(qdev, release,
121 sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
122 palette_bo);
123}
124
125static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
126 struct qxl_release *release,
99 const struct qxl_fb_image *qxl_fb_image) 127 const struct qxl_fb_image *qxl_fb_image)
100{ 128{
101 struct qxl_device *qdev = qxl_fb_image->qdev;
102 const struct fb_image *fb_image = &qxl_fb_image->fb_image; 129 const struct fb_image *fb_image = &qxl_fb_image->fb_image;
103 uint32_t visual = qxl_fb_image->visual; 130 uint32_t visual = qxl_fb_image->visual;
104 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; 131 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
@@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
108 static uint64_t unique; /* we make no attempt to actually set this 135 static uint64_t unique; /* we make no attempt to actually set this
109 * correctly globaly, since that would require 136 * correctly globaly, since that would require
110 * tracking all of our palettes. */ 137 * tracking all of our palettes. */
111 138 ret = qxl_bo_kmap(palette_bo, (void **)&pal);
112 ret = qxl_alloc_bo_reserved(qdev,
113 sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
114 palette_bo);
115
116 ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
117 pal->num_ents = 2; 139 pal->num_ents = 2;
118 pal->unique = unique++; 140 pal->unique = unique++;
119 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { 141 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
126 } 148 }
127 pal->ents[0] = bgcolor; 149 pal->ents[0] = bgcolor;
128 pal->ents[1] = fgcolor; 150 pal->ents[1] = fgcolor;
129 qxl_bo_kunmap(*palette_bo); 151 qxl_bo_kunmap(palette_bo);
130 return 0; 152 return 0;
131} 153}
132 154
@@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
144 const char *src = fb_image->data; 166 const char *src = fb_image->data;
145 int depth = fb_image->depth; 167 int depth = fb_image->depth;
146 struct qxl_release *release; 168 struct qxl_release *release;
147 struct qxl_bo *image_bo;
148 struct qxl_image *image; 169 struct qxl_image *image;
149 int ret; 170 int ret;
150 171 struct qxl_drm_image *dimage;
172 struct qxl_bo *palette_bo = NULL;
151 if (stride == 0) 173 if (stride == 0)
152 stride = depth * width / 8; 174 stride = depth * width / 8;
153 175
176 ret = alloc_drawable(qdev, &release);
177 if (ret)
178 return;
179
180 ret = qxl_image_alloc_objects(qdev, release,
181 &dimage,
182 height, stride);
183 if (ret)
184 goto out_free_drawable;
185
186 if (depth == 1) {
187 ret = alloc_palette_object(qdev, release, &palette_bo);
188 if (ret)
189 goto out_free_image;
190 }
191
192 /* do a reservation run over all the objects we just allocated */
193 ret = qxl_release_reserve_list(release, true);
194 if (ret)
195 goto out_free_palette;
196
154 rect.left = x; 197 rect.left = x;
155 rect.right = x + width; 198 rect.right = x + width;
156 rect.top = y; 199 rect.top = y;
157 rect.bottom = y + height; 200 rect.bottom = y + height;
158 201
159 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); 202 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release);
160 if (ret) 203 if (ret) {
161 return; 204 qxl_release_backoff_reserve_list(release);
205 goto out_free_palette;
206 }
162 207
163 ret = qxl_image_create(qdev, release, &image_bo, 208 ret = qxl_image_init(qdev, release, dimage,
164 (const uint8_t *)src, 0, 0, 209 (const uint8_t *)src, 0, 0,
165 width, height, depth, stride); 210 width, height, depth, stride);
166 if (ret) { 211 if (ret) {
167 qxl_release_unreserve(qdev, release); 212 qxl_release_backoff_reserve_list(release);
168 qxl_release_free(qdev, release); 213 qxl_release_free(qdev, release);
169 return; 214 return;
170 } 215 }
171 216
172 if (depth == 1) { 217 if (depth == 1) {
173 struct qxl_bo *palette_bo;
174 void *ptr; 218 void *ptr;
175 ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); 219 ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
176 qxl_release_add_res(qdev, release, palette_bo);
177 220
178 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); 221 ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
179 image = ptr; 222 image = ptr;
180 image->u.bitmap.palette = 223 image->u.bitmap.palette =
181 qxl_bo_physical_address(qdev, palette_bo, 0); 224 qxl_bo_physical_address(qdev, palette_bo, 0);
182 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); 225 qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr);
183 qxl_bo_unreserve(palette_bo);
184 qxl_bo_unref(&palette_bo);
185 } 226 }
186 227
187 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 228 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
@@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
199 drawable->u.copy.mask.bitmap = 0; 240 drawable->u.copy.mask.bitmap = 0;
200 241
201 drawable->u.copy.src_bitmap = 242 drawable->u.copy.src_bitmap =
202 qxl_bo_physical_address(qdev, image_bo, 0); 243 qxl_bo_physical_address(qdev, dimage->bo, 0);
203 qxl_release_unmap(qdev, release, &drawable->release_info); 244 qxl_release_unmap(qdev, release, &drawable->release_info);
204 245
205 qxl_release_add_res(qdev, release, image_bo);
206 qxl_bo_unreserve(image_bo);
207 qxl_bo_unref(&image_bo);
208
209 qxl_fence_releaseable(qdev, release);
210 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 246 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
211 qxl_release_unreserve(qdev, release); 247 qxl_release_fence_buffer_objects(release);
248
249out_free_palette:
250 if (palette_bo)
251 qxl_bo_unref(&palette_bo);
252out_free_image:
253 qxl_image_free_objects(qdev, dimage);
254out_free_drawable:
255 if (ret)
256 free_drawable(qdev, release);
212} 257}
213 258
214/* push a draw command using the given clipping rectangles as 259/* push a draw command using the given clipping rectangles as
@@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
243 int depth = qxl_fb->base.bits_per_pixel; 288 int depth = qxl_fb->base.bits_per_pixel;
244 uint8_t *surface_base; 289 uint8_t *surface_base;
245 struct qxl_release *release; 290 struct qxl_release *release;
246 struct qxl_bo *image_bo;
247 struct qxl_bo *clips_bo; 291 struct qxl_bo *clips_bo;
292 struct qxl_drm_image *dimage;
248 int ret; 293 int ret;
249 294
295 ret = alloc_drawable(qdev, &release);
296 if (ret)
297 return;
298
250 left = clips->x1; 299 left = clips->x1;
251 right = clips->x2; 300 right = clips->x2;
252 top = clips->y1; 301 top = clips->y1;
@@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
263 312
264 width = right - left; 313 width = right - left;
265 height = bottom - top; 314 height = bottom - top;
315
316 ret = alloc_clips(qdev, release, num_clips, &clips_bo);
317 if (ret)
318 goto out_free_drawable;
319
320 ret = qxl_image_alloc_objects(qdev, release,
321 &dimage,
322 height, stride);
323 if (ret)
324 goto out_free_clips;
325
326 /* do a reservation run over all the objects we just allocated */
327 ret = qxl_release_reserve_list(release, true);
328 if (ret)
329 goto out_free_image;
330
266 drawable_rect.left = left; 331 drawable_rect.left = left;
267 drawable_rect.right = right; 332 drawable_rect.right = right;
268 drawable_rect.top = top; 333 drawable_rect.top = top;
269 drawable_rect.bottom = bottom; 334 drawable_rect.bottom = bottom;
335
270 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, 336 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
271 &release); 337 release);
272 if (ret) 338 if (ret)
273 return; 339 goto out_release_backoff;
274 340
275 ret = qxl_bo_kmap(bo, (void **)&surface_base); 341 ret = qxl_bo_kmap(bo, (void **)&surface_base);
276 if (ret) 342 if (ret)
277 goto out_unref; 343 goto out_release_backoff;
278 344
279 ret = qxl_image_create(qdev, release, &image_bo, surface_base, 345
280 left, top, width, height, depth, stride); 346 ret = qxl_image_init(qdev, release, dimage, surface_base,
347 left, top, width, height, depth, stride);
281 qxl_bo_kunmap(bo); 348 qxl_bo_kunmap(bo);
282 if (ret) 349 if (ret)
283 goto out_unref; 350 goto out_release_backoff;
351
352 rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
353 if (!rects)
354 goto out_release_backoff;
284 355
285 rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
286 if (!rects) {
287 qxl_bo_unref(&image_bo);
288 goto out_unref;
289 }
290 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 356 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
291 357
292 drawable->clip.type = SPICE_CLIP_TYPE_RECTS; 358 drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
293 drawable->clip.data = qxl_bo_physical_address(qdev, 359 drawable->clip.data = qxl_bo_physical_address(qdev,
294 clips_bo, 0); 360 clips_bo, 0);
295 qxl_release_add_res(qdev, release, clips_bo);
296 361
297 drawable->u.copy.src_area.top = 0; 362 drawable->u.copy.src_area.top = 0;
298 drawable->u.copy.src_area.bottom = height; 363 drawable->u.copy.src_area.bottom = height;
@@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
306 drawable->u.copy.mask.pos.y = 0; 371 drawable->u.copy.mask.pos.y = 0;
307 drawable->u.copy.mask.bitmap = 0; 372 drawable->u.copy.mask.bitmap = 0;
308 373
309 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); 374 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
310 qxl_release_unmap(qdev, release, &drawable->release_info); 375 qxl_release_unmap(qdev, release, &drawable->release_info);
311 qxl_release_add_res(qdev, release, image_bo); 376
312 qxl_bo_unreserve(image_bo);
313 qxl_bo_unref(&image_bo);
314 clips_ptr = clips; 377 clips_ptr = clips;
315 for (i = 0; i < num_clips; i++, clips_ptr += inc) { 378 for (i = 0; i < num_clips; i++, clips_ptr += inc) {
316 rects[i].left = clips_ptr->x1; 379 rects[i].left = clips_ptr->x1;
@@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
319 rects[i].bottom = clips_ptr->y2; 382 rects[i].bottom = clips_ptr->y2;
320 } 383 }
321 qxl_bo_kunmap(clips_bo); 384 qxl_bo_kunmap(clips_bo);
322 qxl_bo_unreserve(clips_bo);
323 qxl_bo_unref(&clips_bo);
324 385
325 qxl_fence_releaseable(qdev, release);
326 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 386 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
327 qxl_release_unreserve(qdev, release); 387 qxl_release_fence_buffer_objects(release);
328 return; 388
389out_release_backoff:
390 if (ret)
391 qxl_release_backoff_reserve_list(release);
392out_free_image:
393 qxl_image_free_objects(qdev, dimage);
394out_free_clips:
395 qxl_bo_unref(&clips_bo);
396out_free_drawable:
397 /* only free drawable on error */
398 if (ret)
399 free_drawable(qdev, release);
329 400
330out_unref:
331 qxl_release_unreserve(qdev, release);
332 qxl_release_free(qdev, release);
333} 401}
334 402
335void qxl_draw_copyarea(struct qxl_device *qdev, 403void qxl_draw_copyarea(struct qxl_device *qdev,
@@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
342 struct qxl_release *release; 410 struct qxl_release *release;
343 int ret; 411 int ret;
344 412
413 ret = alloc_drawable(qdev, &release);
414 if (ret)
415 return;
416
417 /* do a reservation run over all the objects we just allocated */
418 ret = qxl_release_reserve_list(release, true);
419 if (ret)
420 goto out_free_release;
421
345 rect.left = dx; 422 rect.left = dx;
346 rect.top = dy; 423 rect.top = dy;
347 rect.right = dx + width; 424 rect.right = dx + width;
348 rect.bottom = dy + height; 425 rect.bottom = dy + height;
349 ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); 426 ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release);
350 if (ret) 427 if (ret) {
351 return; 428 qxl_release_backoff_reserve_list(release);
429 goto out_free_release;
430 }
352 431
353 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 432 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
354 drawable->u.copy_bits.src_pos.x = sx; 433 drawable->u.copy_bits.src_pos.x = sx;
355 drawable->u.copy_bits.src_pos.y = sy; 434 drawable->u.copy_bits.src_pos.y = sy;
356
357 qxl_release_unmap(qdev, release, &drawable->release_info); 435 qxl_release_unmap(qdev, release, &drawable->release_info);
358 qxl_fence_releaseable(qdev, release); 436
359 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 437 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
360 qxl_release_unreserve(qdev, release); 438 qxl_release_fence_buffer_objects(release);
439
440out_free_release:
441 if (ret)
442 free_drawable(qdev, release);
361} 443}
362 444
363void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) 445void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
@@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
370 struct qxl_release *release; 452 struct qxl_release *release;
371 int ret; 453 int ret;
372 454
373 ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); 455 ret = alloc_drawable(qdev, &release);
374 if (ret) 456 if (ret)
375 return; 457 return;
376 458
459 /* do a reservation run over all the objects we just allocated */
460 ret = qxl_release_reserve_list(release, true);
461 if (ret)
462 goto out_free_release;
463
464 ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release);
465 if (ret) {
466 qxl_release_backoff_reserve_list(release);
467 goto out_free_release;
468 }
469
377 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 470 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
378 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; 471 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
379 drawable->u.fill.brush.u.color = color; 472 drawable->u.fill.brush.u.color = color;
@@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
384 drawable->u.fill.mask.bitmap = 0; 477 drawable->u.fill.mask.bitmap = 0;
385 478
386 qxl_release_unmap(qdev, release, &drawable->release_info); 479 qxl_release_unmap(qdev, release, &drawable->release_info);
387 qxl_fence_releaseable(qdev, release); 480
388 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 481 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
389 qxl_release_unreserve(qdev, release); 482 qxl_release_fence_buffer_objects(release);
483
484out_free_release:
485 if (ret)
486 free_drawable(qdev, release);
390} 487}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index df0b577a6608..514118ae72d4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -84,7 +84,6 @@ static const struct file_operations qxl_fops = {
84 .release = drm_release, 84 .release = drm_release,
85 .unlocked_ioctl = drm_ioctl, 85 .unlocked_ioctl = drm_ioctl,
86 .poll = drm_poll, 86 .poll = drm_poll,
87 .fasync = drm_fasync,
88 .mmap = qxl_mmap, 87 .mmap = qxl_mmap,
89}; 88};
90 89
@@ -221,7 +220,7 @@ static struct drm_driver qxl_driver = {
221 220
222 .dumb_create = qxl_mode_dumb_create, 221 .dumb_create = qxl_mode_dumb_create,
223 .dumb_map_offset = qxl_mode_dumb_mmap, 222 .dumb_map_offset = qxl_mode_dumb_mmap,
224 .dumb_destroy = qxl_mode_dumb_destroy, 223 .dumb_destroy = drm_gem_dumb_destroy,
225#if defined(CONFIG_DEBUG_FS) 224#if defined(CONFIG_DEBUG_FS)
226 .debugfs_init = qxl_debugfs_init, 225 .debugfs_init = qxl_debugfs_init,
227 .debugfs_cleanup = qxl_debugfs_takedown, 226 .debugfs_cleanup = qxl_debugfs_takedown,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index aacb791464a3..f7c9adde46a0 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -42,6 +42,9 @@
42#include <ttm/ttm_placement.h> 42#include <ttm/ttm_placement.h>
43#include <ttm/ttm_module.h> 43#include <ttm/ttm_module.h>
44 44
45/* just for ttm_validate_buffer */
46#include <ttm/ttm_execbuf_util.h>
47
45#include <drm/qxl_drm.h> 48#include <drm/qxl_drm.h>
46#include "qxl_dev.h" 49#include "qxl_dev.h"
47 50
@@ -118,9 +121,9 @@ struct qxl_bo {
118 uint32_t surface_id; 121 uint32_t surface_id;
119 struct qxl_fence fence; /* per bo fence - list of releases */ 122 struct qxl_fence fence; /* per bo fence - list of releases */
120 struct qxl_release *surf_create; 123 struct qxl_release *surf_create;
121 atomic_t reserve_count;
122}; 124};
123#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) 125#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
126#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
124 127
125struct qxl_gem { 128struct qxl_gem {
126 struct mutex mutex; 129 struct mutex mutex;
@@ -128,12 +131,7 @@ struct qxl_gem {
128}; 131};
129 132
130struct qxl_bo_list { 133struct qxl_bo_list {
131 struct list_head lhead; 134 struct ttm_validate_buffer tv;
132 struct qxl_bo *bo;
133};
134
135struct qxl_reloc_list {
136 struct list_head bos;
137}; 135};
138 136
139struct qxl_crtc { 137struct qxl_crtc {
@@ -195,10 +193,20 @@ enum {
195struct qxl_release { 193struct qxl_release {
196 int id; 194 int id;
197 int type; 195 int type;
198 int bo_count;
199 uint32_t release_offset; 196 uint32_t release_offset;
200 uint32_t surface_release_id; 197 uint32_t surface_release_id;
201 struct qxl_bo *bos[QXL_MAX_RES]; 198 struct ww_acquire_ctx ticket;
199 struct list_head bos;
200};
201
202struct qxl_drm_chunk {
203 struct list_head head;
204 struct qxl_bo *bo;
205};
206
207struct qxl_drm_image {
208 struct qxl_bo *bo;
209 struct list_head chunk_list;
202}; 210};
203 211
204struct qxl_fb_image { 212struct qxl_fb_image {
@@ -314,12 +322,13 @@ struct qxl_device {
314 struct workqueue_struct *gc_queue; 322 struct workqueue_struct *gc_queue;
315 struct work_struct gc_work; 323 struct work_struct gc_work;
316 324
325 struct work_struct fb_work;
317}; 326};
318 327
319/* forward declaration for QXL_INFO_IO */ 328/* forward declaration for QXL_INFO_IO */
320void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...); 329void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
321 330
322extern struct drm_ioctl_desc qxl_ioctls[]; 331extern const struct drm_ioctl_desc qxl_ioctls[];
323extern int qxl_max_ioctl; 332extern int qxl_max_ioctl;
324 333
325int qxl_driver_load(struct drm_device *dev, unsigned long flags); 334int qxl_driver_load(struct drm_device *dev, unsigned long flags);
@@ -396,9 +405,6 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
396 bool discardable, bool kernel, 405 bool discardable, bool kernel,
397 struct qxl_surface *surf, 406 struct qxl_surface *surf,
398 struct drm_gem_object **obj); 407 struct drm_gem_object **obj);
399int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
400 uint64_t *gpu_addr);
401void qxl_gem_object_unpin(struct drm_gem_object *obj);
402int qxl_gem_object_create_with_handle(struct qxl_device *qdev, 408int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
403 struct drm_file *file_priv, 409 struct drm_file *file_priv,
404 u32 domain, 410 u32 domain,
@@ -418,9 +424,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
418int qxl_mode_dumb_create(struct drm_file *file_priv, 424int qxl_mode_dumb_create(struct drm_file *file_priv,
419 struct drm_device *dev, 425 struct drm_device *dev,
420 struct drm_mode_create_dumb *args); 426 struct drm_mode_create_dumb *args);
421int qxl_mode_dumb_destroy(struct drm_file *file_priv,
422 struct drm_device *dev,
423 uint32_t handle);
424int qxl_mode_dumb_mmap(struct drm_file *filp, 427int qxl_mode_dumb_mmap(struct drm_file *filp,
425 struct drm_device *dev, 428 struct drm_device *dev,
426 uint32_t handle, uint64_t *offset_p); 429 uint32_t handle, uint64_t *offset_p);
@@ -433,12 +436,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
433 436
434/* qxl image */ 437/* qxl image */
435 438
436int qxl_image_create(struct qxl_device *qdev, 439int qxl_image_init(struct qxl_device *qdev,
437 struct qxl_release *release, 440 struct qxl_release *release,
438 struct qxl_bo **image_bo, 441 struct qxl_drm_image *dimage,
439 const uint8_t *data, 442 const uint8_t *data,
440 int x, int y, int width, int height, 443 int x, int y, int width, int height,
441 int depth, int stride); 444 int depth, int stride);
445int
446qxl_image_alloc_objects(struct qxl_device *qdev,
447 struct qxl_release *release,
448 struct qxl_drm_image **image_ptr,
449 int height, int stride);
450void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
451
442void qxl_update_screen(struct qxl_device *qxl); 452void qxl_update_screen(struct qxl_device *qxl);
443 453
444/* qxl io operations (qxl_cmd.c) */ 454/* qxl io operations (qxl_cmd.c) */
@@ -459,20 +469,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible
459void qxl_io_flush_release(struct qxl_device *qdev); 469void qxl_io_flush_release(struct qxl_device *qdev);
460void qxl_io_flush_surfaces(struct qxl_device *qdev); 470void qxl_io_flush_surfaces(struct qxl_device *qdev);
461 471
462int qxl_release_reserve(struct qxl_device *qdev,
463 struct qxl_release *release, bool no_wait);
464void qxl_release_unreserve(struct qxl_device *qdev,
465 struct qxl_release *release);
466union qxl_release_info *qxl_release_map(struct qxl_device *qdev, 472union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
467 struct qxl_release *release); 473 struct qxl_release *release);
468void qxl_release_unmap(struct qxl_device *qdev, 474void qxl_release_unmap(struct qxl_device *qdev,
469 struct qxl_release *release, 475 struct qxl_release *release,
470 union qxl_release_info *info); 476 union qxl_release_info *info);
471/* 477int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
472 * qxl_bo_add_resource. 478int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
473 * 479void qxl_release_backoff_reserve_list(struct qxl_release *release);
474 */ 480void qxl_release_fence_buffer_objects(struct qxl_release *release);
475void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
476 481
477int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 482int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
478 enum qxl_surface_cmd_type surface_cmd_type, 483 enum qxl_surface_cmd_type surface_cmd_type,
@@ -481,15 +486,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
481int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, 486int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
482 int type, struct qxl_release **release, 487 int type, struct qxl_release **release,
483 struct qxl_bo **rbo); 488 struct qxl_bo **rbo);
484int qxl_fence_releaseable(struct qxl_device *qdev, 489
485 struct qxl_release *release);
486int 490int
487qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, 491qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
488 uint32_t type, bool interruptible); 492 uint32_t type, bool interruptible);
489int 493int
490qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, 494qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
491 uint32_t type, bool interruptible); 495 uint32_t type, bool interruptible);
492int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, 496int qxl_alloc_bo_reserved(struct qxl_device *qdev,
497 struct qxl_release *release,
498 unsigned long size,
493 struct qxl_bo **_bo); 499 struct qxl_bo **_bo);
494/* qxl drawing commands */ 500/* qxl drawing commands */
495 501
@@ -510,15 +516,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
510 u32 sx, u32 sy, 516 u32 sx, u32 sy,
511 u32 dx, u32 dy); 517 u32 dx, u32 dy);
512 518
513uint64_t
514qxl_release_alloc(struct qxl_device *qdev, int type,
515 struct qxl_release **ret);
516
517void qxl_release_free(struct qxl_device *qdev, 519void qxl_release_free(struct qxl_device *qdev,
518 struct qxl_release *release); 520 struct qxl_release *release);
519void qxl_release_add_res(struct qxl_device *qdev, 521
520 struct qxl_release *release,
521 struct qxl_bo *bo);
522/* used by qxl_debugfs_release */ 522/* used by qxl_debugfs_release */
523struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 523struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
524 uint64_t id); 524 uint64_t id);
@@ -561,7 +561,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein
561int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); 561int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
562 562
563/* qxl_fence.c */ 563/* qxl_fence.c */
564int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); 564void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
565int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); 565int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
566int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); 566int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
567void qxl_fence_fini(struct qxl_fence *qfence); 567void qxl_fence_fini(struct qxl_fence *qfence);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 847c4ee798f7..d34bb4130ff0 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
68 return 0; 68 return 0;
69} 69}
70 70
71int qxl_mode_dumb_destroy(struct drm_file *file_priv,
72 struct drm_device *dev,
73 uint32_t handle)
74{
75 return drm_gem_handle_delete(file_priv, handle);
76}
77
78int qxl_mode_dumb_mmap(struct drm_file *file_priv, 71int qxl_mode_dumb_mmap(struct drm_file *file_priv,
79 struct drm_device *dev, 72 struct drm_device *dev,
80 uint32_t handle, uint64_t *offset_p) 73 uint32_t handle, uint64_t *offset_p)
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 76f39d88d684..88722f233430 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -37,12 +37,29 @@
37 37
38#define QXL_DIRTY_DELAY (HZ / 30) 38#define QXL_DIRTY_DELAY (HZ / 30)
39 39
40#define QXL_FB_OP_FILLRECT 1
41#define QXL_FB_OP_COPYAREA 2
42#define QXL_FB_OP_IMAGEBLIT 3
43
44struct qxl_fb_op {
45 struct list_head head;
46 int op_type;
47 union {
48 struct fb_fillrect fr;
49 struct fb_copyarea ca;
50 struct fb_image ib;
51 } op;
52 void *img_data;
53};
54
40struct qxl_fbdev { 55struct qxl_fbdev {
41 struct drm_fb_helper helper; 56 struct drm_fb_helper helper;
42 struct qxl_framebuffer qfb; 57 struct qxl_framebuffer qfb;
43 struct list_head fbdev_list; 58 struct list_head fbdev_list;
44 struct qxl_device *qdev; 59 struct qxl_device *qdev;
45 60
61 spinlock_t delayed_ops_lock;
62 struct list_head delayed_ops;
46 void *shadow; 63 void *shadow;
47 int size; 64 int size;
48 65
@@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = {
164 .deferred_io = qxl_deferred_io, 181 .deferred_io = qxl_deferred_io,
165}; 182};
166 183
167static void qxl_fb_fillrect(struct fb_info *info, 184static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
168 const struct fb_fillrect *fb_rect) 185 const struct fb_fillrect *fb_rect)
186{
187 struct qxl_fb_op *op;
188 unsigned long flags;
189
190 op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
191 if (!op)
192 return;
193
194 op->op.fr = *fb_rect;
195 op->img_data = NULL;
196 op->op_type = QXL_FB_OP_FILLRECT;
197
198 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
199 list_add_tail(&op->head, &qfbdev->delayed_ops);
200 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
201}
202
203static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
204 const struct fb_copyarea *fb_copy)
205{
206 struct qxl_fb_op *op;
207 unsigned long flags;
208
209 op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
210 if (!op)
211 return;
212
213 op->op.ca = *fb_copy;
214 op->img_data = NULL;
215 op->op_type = QXL_FB_OP_COPYAREA;
216
217 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
218 list_add_tail(&op->head, &qfbdev->delayed_ops);
219 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
220}
221
222static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
223 const struct fb_image *fb_image)
224{
225 struct qxl_fb_op *op;
226 unsigned long flags;
227 uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
228
229 op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
230 if (!op)
231 return;
232
233 op->op.ib = *fb_image;
234 op->img_data = (void *)(op + 1);
235 op->op_type = QXL_FB_OP_IMAGEBLIT;
236
237 memcpy(op->img_data, fb_image->data, size);
238
239 op->op.ib.data = op->img_data;
240 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
241 list_add_tail(&op->head, &qfbdev->delayed_ops);
242 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
243}
244
245static void qxl_fb_fillrect_internal(struct fb_info *info,
246 const struct fb_fillrect *fb_rect)
169{ 247{
170 struct qxl_fbdev *qfbdev = info->par; 248 struct qxl_fbdev *qfbdev = info->par;
171 struct qxl_device *qdev = qfbdev->qdev; 249 struct qxl_device *qdev = qfbdev->qdev;
@@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info,
203 qxl_draw_fill_rec.rect = rect; 281 qxl_draw_fill_rec.rect = rect;
204 qxl_draw_fill_rec.color = color; 282 qxl_draw_fill_rec.color = color;
205 qxl_draw_fill_rec.rop = rop; 283 qxl_draw_fill_rec.rop = rop;
284
285 qxl_draw_fill(&qxl_draw_fill_rec);
286}
287
288static void qxl_fb_fillrect(struct fb_info *info,
289 const struct fb_fillrect *fb_rect)
290{
291 struct qxl_fbdev *qfbdev = info->par;
292 struct qxl_device *qdev = qfbdev->qdev;
293
206 if (!drm_can_sleep()) { 294 if (!drm_can_sleep()) {
207 qxl_io_log(qdev, 295 qxl_fb_delayed_fillrect(qfbdev, fb_rect);
208 "%s: TODO use RCU, mysterious locks with spin_lock\n", 296 schedule_work(&qdev->fb_work);
209 __func__);
210 return; 297 return;
211 } 298 }
212 qxl_draw_fill(&qxl_draw_fill_rec); 299 /* make sure any previous work is done */
300 flush_work(&qdev->fb_work);
301 qxl_fb_fillrect_internal(info, fb_rect);
213} 302}
214 303
215static void qxl_fb_copyarea(struct fb_info *info, 304static void qxl_fb_copyarea_internal(struct fb_info *info,
216 const struct fb_copyarea *region) 305 const struct fb_copyarea *region)
217{ 306{
218 struct qxl_fbdev *qfbdev = info->par; 307 struct qxl_fbdev *qfbdev = info->par;
219 308
@@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info,
223 region->dx, region->dy); 312 region->dx, region->dy);
224} 313}
225 314
315static void qxl_fb_copyarea(struct fb_info *info,
316 const struct fb_copyarea *region)
317{
318 struct qxl_fbdev *qfbdev = info->par;
319 struct qxl_device *qdev = qfbdev->qdev;
320
321 if (!drm_can_sleep()) {
322 qxl_fb_delayed_copyarea(qfbdev, region);
323 schedule_work(&qdev->fb_work);
324 return;
325 }
326 /* make sure any previous work is done */
327 flush_work(&qdev->fb_work);
328 qxl_fb_copyarea_internal(info, region);
329}
330
226static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) 331static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
227{ 332{
228 qxl_draw_opaque_fb(qxl_fb_image, 0); 333 qxl_draw_opaque_fb(qxl_fb_image, 0);
229} 334}
230 335
336static void qxl_fb_imageblit_internal(struct fb_info *info,
337 const struct fb_image *image)
338{
339 struct qxl_fbdev *qfbdev = info->par;
340 struct qxl_fb_image qxl_fb_image;
341
342 /* ensure proper order rendering operations - TODO: must do this
343 * for everything. */
344 qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
345 qxl_fb_imageblit_safe(&qxl_fb_image);
346}
347
231static void qxl_fb_imageblit(struct fb_info *info, 348static void qxl_fb_imageblit(struct fb_info *info,
232 const struct fb_image *image) 349 const struct fb_image *image)
233{ 350{
234 struct qxl_fbdev *qfbdev = info->par; 351 struct qxl_fbdev *qfbdev = info->par;
235 struct qxl_device *qdev = qfbdev->qdev; 352 struct qxl_device *qdev = qfbdev->qdev;
236 struct qxl_fb_image qxl_fb_image;
237 353
238 if (!drm_can_sleep()) { 354 if (!drm_can_sleep()) {
239 /* we cannot do any ttm_bo allocation since that will fail on 355 qxl_fb_delayed_imageblit(qfbdev, image);
240 * ioremap_wc..__get_vm_area_node, so queue the work item 356 schedule_work(&qdev->fb_work);
241 * instead This can happen from printk inside an interrupt
242 * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
243 qxl_io_log(qdev,
244 "%s: TODO use RCU, mysterious locks with spin_lock\n",
245 __func__);
246 return; 357 return;
247 } 358 }
359 /* make sure any previous work is done */
360 flush_work(&qdev->fb_work);
361 qxl_fb_imageblit_internal(info, image);
362}
248 363
249 /* ensure proper order of rendering operations - TODO: must do this 364static void qxl_fb_work(struct work_struct *work)
250 * for everything. */ 365{
251 qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); 366 struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
252 qxl_fb_imageblit_safe(&qxl_fb_image); 367 unsigned long flags;
368 struct qxl_fb_op *entry, *tmp;
369 struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
370
371 /* since the irq context just adds entries to the end of the
372 list dropping the lock should be fine, as entry isn't modified
373 in the operation code */
374 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
375 list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
376 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
377 switch (entry->op_type) {
378 case QXL_FB_OP_FILLRECT:
379 qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
380 break;
381 case QXL_FB_OP_COPYAREA:
382 qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
383 break;
384 case QXL_FB_OP_IMAGEBLIT:
385 qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
386 break;
387 }
388 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
389 list_del(&entry->head);
390 kfree(entry);
391 }
392 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
253} 393}
254 394
255int qxl_fb_init(struct qxl_device *qdev) 395int qxl_fb_init(struct qxl_device *qdev)
256{ 396{
397 INIT_WORK(&qdev->fb_work, qxl_fb_work);
257 return 0; 398 return 0;
258} 399}
259 400
@@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev)
536 qfbdev->qdev = qdev; 677 qfbdev->qdev = qdev;
537 qdev->mode_info.qfbdev = qfbdev; 678 qdev->mode_info.qfbdev = qfbdev;
538 qfbdev->helper.funcs = &qxl_fb_helper_funcs; 679 qfbdev->helper.funcs = &qxl_fb_helper_funcs;
539 680 spin_lock_init(&qfbdev->delayed_ops_lock);
681 INIT_LIST_HEAD(&qfbdev->delayed_ops);
540 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, 682 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
541 qxl_num_crtc /* num_crtc - QXL supports just 1 */, 683 qxl_num_crtc /* num_crtc - QXL supports just 1 */,
542 QXLFB_CONN_LIMIT); 684 QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
index 63c6715ad385..ae59e91cfb9a 100644
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -49,17 +49,11 @@
49 49
50 For some reason every so often qxl hw fails to release, things go wrong. 50 For some reason every so often qxl hw fails to release, things go wrong.
51*/ 51*/
52 52/* must be called with the fence lock held */
53 53void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
54int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
55{ 54{
56 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
57
58 spin_lock(&bo->tbo.bdev->fence_lock);
59 radix_tree_insert(&qfence->tree, rel_id, qfence); 55 radix_tree_insert(&qfence->tree, rel_id, qfence);
60 qfence->num_active_releases++; 56 qfence->num_active_releases++;
61 spin_unlock(&bo->tbo.bdev->fence_lock);
62 return 0;
63} 57}
64 58
65int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) 59int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index a235693aabba..1648e4125af7 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
55 /* At least align on page size */ 55 /* At least align on page size */
56 if (alignment < PAGE_SIZE) 56 if (alignment < PAGE_SIZE)
57 alignment = PAGE_SIZE; 57 alignment = PAGE_SIZE;
58 r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); 58 r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
59 if (r) { 59 if (r) {
60 if (r != -ERESTARTSYS) 60 if (r != -ERESTARTSYS)
61 DRM_ERROR( 61 DRM_ERROR(
@@ -101,32 +101,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
101 return 0; 101 return 0;
102} 102}
103 103
104int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
105 uint64_t *gpu_addr)
106{
107 struct qxl_bo *qobj = obj->driver_private;
108 int r;
109
110 r = qxl_bo_reserve(qobj, false);
111 if (unlikely(r != 0))
112 return r;
113 r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
114 qxl_bo_unreserve(qobj);
115 return r;
116}
117
118void qxl_gem_object_unpin(struct drm_gem_object *obj)
119{
120 struct qxl_bo *qobj = obj->driver_private;
121 int r;
122
123 r = qxl_bo_reserve(qobj, false);
124 if (likely(r == 0)) {
125 qxl_bo_unpin(qobj);
126 qxl_bo_unreserve(qobj);
127 }
128}
129
130int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 104int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
131{ 105{
132 return 0; 106 return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index cf856206996b..7fbcc35e8ad3 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -30,31 +30,100 @@
30#include "qxl_object.h" 30#include "qxl_object.h"
31 31
32static int 32static int
33qxl_image_create_helper(struct qxl_device *qdev, 33qxl_allocate_chunk(struct qxl_device *qdev,
34 struct qxl_release *release,
35 struct qxl_drm_image *image,
36 unsigned int chunk_size)
37{
38 struct qxl_drm_chunk *chunk;
39 int ret;
40
41 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
42 if (!chunk)
43 return -ENOMEM;
44
45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
46 if (ret) {
47 kfree(chunk);
48 return ret;
49 }
50
51 list_add_tail(&chunk->head, &image->chunk_list);
52 return 0;
53}
54
55int
56qxl_image_alloc_objects(struct qxl_device *qdev,
34 struct qxl_release *release, 57 struct qxl_release *release,
35 struct qxl_bo **image_bo, 58 struct qxl_drm_image **image_ptr,
36 const uint8_t *data, 59 int height, int stride)
37 int width, int height, 60{
38 int depth, unsigned int hash, 61 struct qxl_drm_image *image;
39 int stride) 62 int ret;
63
64 image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
65 if (!image)
66 return -ENOMEM;
67
68 INIT_LIST_HEAD(&image->chunk_list);
69
70 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
71 if (ret) {
72 kfree(image);
73 return ret;
74 }
75
76 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
77 if (ret) {
78 qxl_bo_unref(&image->bo);
79 kfree(image);
80 return ret;
81 }
82 *image_ptr = image;
83 return 0;
84}
85
86void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
40{ 87{
88 struct qxl_drm_chunk *chunk, *tmp;
89
90 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
91 qxl_bo_unref(&chunk->bo);
92 kfree(chunk);
93 }
94
95 qxl_bo_unref(&dimage->bo);
96 kfree(dimage);
97}
98
99static int
100qxl_image_init_helper(struct qxl_device *qdev,
101 struct qxl_release *release,
102 struct qxl_drm_image *dimage,
103 const uint8_t *data,
104 int width, int height,
105 int depth, unsigned int hash,
106 int stride)
107{
108 struct qxl_drm_chunk *drv_chunk;
41 struct qxl_image *image; 109 struct qxl_image *image;
42 struct qxl_data_chunk *chunk; 110 struct qxl_data_chunk *chunk;
43 int i; 111 int i;
44 int chunk_stride; 112 int chunk_stride;
45 int linesize = width * depth / 8; 113 int linesize = width * depth / 8;
46 struct qxl_bo *chunk_bo; 114 struct qxl_bo *chunk_bo, *image_bo;
47 int ret;
48 void *ptr; 115 void *ptr;
49 /* Chunk */ 116 /* Chunk */
50 /* FIXME: Check integer overflow */ 117 /* FIXME: Check integer overflow */
51 /* TODO: variable number of chunks */ 118 /* TODO: variable number of chunks */
119
120 drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
121
122 chunk_bo = drv_chunk->bo;
52 chunk_stride = stride; /* TODO: should use linesize, but it renders 123 chunk_stride = stride; /* TODO: should use linesize, but it renders
53 wrong (check the bitmaps are sent correctly 124 wrong (check the bitmaps are sent correctly
54 first) */ 125 first) */
55 ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, 126
56 &chunk_bo);
57
58 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); 127 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
59 chunk = ptr; 128 chunk = ptr;
60 chunk->data_size = height * chunk_stride; 129 chunk->data_size = height * chunk_stride;
@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev,
102 while (remain > 0) { 171 while (remain > 0) {
103 page_base = out_offset & PAGE_MASK; 172 page_base = out_offset & PAGE_MASK;
104 page_offset = offset_in_page(out_offset); 173 page_offset = offset_in_page(out_offset);
105
106 size = min((int)(PAGE_SIZE - page_offset), remain); 174 size = min((int)(PAGE_SIZE - page_offset), remain);
107 175
108 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); 176 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev,
116 } 184 }
117 } 185 }
118 } 186 }
119
120
121 qxl_bo_kunmap(chunk_bo); 187 qxl_bo_kunmap(chunk_bo);
122 188
123 /* Image */ 189 image_bo = dimage->bo;
124 ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); 190 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
125
126 ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
127 image = ptr; 191 image = ptr;
128 192
129 image->descriptor.id = 0; 193 image->descriptor.id = 0;
@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev,
154 image->u.bitmap.stride = chunk_stride; 218 image->u.bitmap.stride = chunk_stride;
155 image->u.bitmap.palette = 0; 219 image->u.bitmap.palette = 0;
156 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); 220 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
157 qxl_release_add_res(qdev, release, chunk_bo);
158 qxl_bo_unreserve(chunk_bo);
159 qxl_bo_unref(&chunk_bo);
160 221
161 qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); 222 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
162 223
163 return 0; 224 return 0;
164} 225}
165 226
166int qxl_image_create(struct qxl_device *qdev, 227int qxl_image_init(struct qxl_device *qdev,
167 struct qxl_release *release, 228 struct qxl_release *release,
168 struct qxl_bo **image_bo, 229 struct qxl_drm_image *dimage,
169 const uint8_t *data, 230 const uint8_t *data,
170 int x, int y, int width, int height, 231 int x, int y, int width, int height,
171 int depth, int stride) 232 int depth, int stride)
172{ 233{
173 data += y * stride + x * (depth / 8); 234 data += y * stride + x * (depth / 8);
174 return qxl_image_create_helper(qdev, release, image_bo, data, 235 return qxl_image_init_helper(qdev, release, dimage, data,
175 width, height, depth, 0, stride); 236 width, height, depth, 0, stride);
176} 237}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 27f45e49250d..7b95c75e9626 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
68 &qxl_map->offset); 68 &qxl_map->offset);
69} 69}
70 70
71struct qxl_reloc_info {
72 int type;
73 struct qxl_bo *dst_bo;
74 uint32_t dst_offset;
75 struct qxl_bo *src_bo;
76 int src_offset;
77};
78
71/* 79/*
72 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's 80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
73 * are on vram). 81 * are on vram).
74 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) 82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
75 */ 83 */
76static void 84static void
77apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, 85apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
78 struct qxl_bo *src, uint64_t src_off)
79{ 86{
80 void *reloc_page; 87 void *reloc_page;
81 88 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
82 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); 89 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
83 *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, 90 info->src_bo,
84 src, src_off); 91 info->src_offset);
85 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); 92 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
86} 93}
87 94
88static void 95static void
89apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, 96apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
90 struct qxl_bo *src)
91{ 97{
92 uint32_t id = 0; 98 uint32_t id = 0;
93 void *reloc_page; 99 void *reloc_page;
94 100
95 if (src && !src->is_primary) 101 if (info->src_bo && !info->src_bo->is_primary)
96 id = src->surface_id; 102 id = info->src_bo->surface_id;
97 103
98 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); 104 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
99 *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; 105 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
100 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); 106 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
101} 107}
102 108
103/* return holding the reference to this object */ 109/* return holding the reference to this object */
104static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, 110static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
105 struct drm_file *file_priv, uint64_t handle, 111 struct drm_file *file_priv, uint64_t handle,
106 struct qxl_reloc_list *reloc_list) 112 struct qxl_release *release)
107{ 113{
108 struct drm_gem_object *gobj; 114 struct drm_gem_object *gobj;
109 struct qxl_bo *qobj; 115 struct qxl_bo *qobj;
110 int ret; 116 int ret;
111 117
112 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); 118 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
113 if (!gobj) { 119 if (!gobj)
114 DRM_ERROR("bad bo handle %lld\n", handle);
115 return NULL; 120 return NULL;
116 } 121
117 qobj = gem_to_qxl_bo(gobj); 122 qobj = gem_to_qxl_bo(gobj);
118 123
119 ret = qxl_bo_list_add(reloc_list, qobj); 124 ret = qxl_release_list_add(release, qobj);
120 if (ret) 125 if (ret)
121 return NULL; 126 return NULL;
122 127
@@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
129 * However, the command as passed from user space must *not* contain the initial 134 * However, the command as passed from user space must *not* contain the initial
130 * QXLReleaseInfo struct (first XXX bytes) 135 * QXLReleaseInfo struct (first XXX bytes)
131 */ 136 */
132static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, 137static int qxl_process_single_command(struct qxl_device *qdev,
133 struct drm_file *file_priv) 138 struct drm_qxl_command *cmd,
139 struct drm_file *file_priv)
134{ 140{
135 struct qxl_device *qdev = dev->dev_private; 141 struct qxl_reloc_info *reloc_info;
136 struct drm_qxl_execbuffer *execbuffer = data; 142 int release_type;
137 struct drm_qxl_command user_cmd; 143 struct qxl_release *release;
138 int cmd_num; 144 struct qxl_bo *cmd_bo;
139 struct qxl_bo *reloc_src_bo;
140 struct qxl_bo *reloc_dst_bo;
141 struct drm_qxl_reloc reloc;
142 void *fb_cmd; 145 void *fb_cmd;
143 int i, ret; 146 int i, j, ret, num_relocs;
144 struct qxl_reloc_list reloc_list;
145 int unwritten; 147 int unwritten;
146 uint32_t reloc_dst_offset;
147 INIT_LIST_HEAD(&reloc_list.bos);
148 148
149 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { 149 switch (cmd->type) {
150 struct qxl_release *release; 150 case QXL_CMD_DRAW:
151 struct qxl_bo *cmd_bo; 151 release_type = QXL_RELEASE_DRAWABLE;
152 int release_type; 152 break;
153 struct drm_qxl_command *commands = 153 case QXL_CMD_SURFACE:
154 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; 154 case QXL_CMD_CURSOR:
155 default:
156 DRM_DEBUG("Only draw commands in execbuffers\n");
157 return -EINVAL;
158 break;
159 }
155 160
156 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], 161 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
157 sizeof(user_cmd))) 162 return -EINVAL;
158 return -EFAULT;
159 switch (user_cmd.type) {
160 case QXL_CMD_DRAW:
161 release_type = QXL_RELEASE_DRAWABLE;
162 break;
163 case QXL_CMD_SURFACE:
164 case QXL_CMD_CURSOR:
165 default:
166 DRM_DEBUG("Only draw commands in execbuffers\n");
167 return -EINVAL;
168 break;
169 }
170 163
171 if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) 164 if (!access_ok(VERIFY_READ,
172 return -EINVAL; 165 (void *)(unsigned long)cmd->command,
166 cmd->command_size))
167 return -EFAULT;
173 168
174 if (!access_ok(VERIFY_READ, 169 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
175 (void *)(unsigned long)user_cmd.command, 170 if (!reloc_info)
176 user_cmd.command_size)) 171 return -ENOMEM;
177 return -EFAULT;
178 172
179 ret = qxl_alloc_release_reserved(qdev, 173 ret = qxl_alloc_release_reserved(qdev,
180 sizeof(union qxl_release_info) + 174 sizeof(union qxl_release_info) +
181 user_cmd.command_size, 175 cmd->command_size,
182 release_type, 176 release_type,
183 &release, 177 &release,
184 &cmd_bo); 178 &cmd_bo);
185 if (ret) 179 if (ret)
186 return ret; 180 goto out_free_reloc;
187 181
188 /* TODO copy slow path code from i915 */ 182 /* TODO copy slow path code from i915 */
189 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 183 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
190 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); 184 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
191 185
192 { 186 {
193 struct qxl_drawable *draw = fb_cmd; 187 struct qxl_drawable *draw = fb_cmd;
188 draw->mm_time = qdev->rom->mm_clock;
189 }
194 190
195 draw->mm_time = qdev->rom->mm_clock; 191 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
196 } 192 if (unwritten) {
197 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); 193 DRM_ERROR("got unwritten %d\n", unwritten);
198 if (unwritten) { 194 ret = -EFAULT;
199 DRM_ERROR("got unwritten %d\n", unwritten); 195 goto out_free_release;
200 qxl_release_unreserve(qdev, release); 196 }
201 qxl_release_free(qdev, release); 197
202 return -EFAULT; 198 /* fill out reloc info structs */
199 num_relocs = 0;
200 for (i = 0; i < cmd->relocs_num; ++i) {
201 struct drm_qxl_reloc reloc;
202
203 if (DRM_COPY_FROM_USER(&reloc,
204 &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
205 sizeof(reloc))) {
206 ret = -EFAULT;
207 goto out_free_bos;
203 } 208 }
204 209
205 for (i = 0 ; i < user_cmd.relocs_num; ++i) { 210 /* add the bos to the list of bos to validate -
206 if (DRM_COPY_FROM_USER(&reloc, 211 need to validate first then process relocs? */
207 &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], 212 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
208 sizeof(reloc))) { 213 DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
209 qxl_bo_list_unreserve(&reloc_list, true);
210 qxl_release_unreserve(qdev, release);
211 qxl_release_free(qdev, release);
212 return -EFAULT;
213 }
214 214
215 /* add the bos to the list of bos to validate - 215 ret = -EINVAL;
216 need to validate first then process relocs? */ 216 goto out_free_bos;
217 if (reloc.dst_handle) { 217 }
218 reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, 218 reloc_info[i].type = reloc.reloc_type;
219 reloc.dst_handle, &reloc_list); 219
220 if (!reloc_dst_bo) { 220 if (reloc.dst_handle) {
221 qxl_bo_list_unreserve(&reloc_list, true); 221 reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
222 qxl_release_unreserve(qdev, release); 222 reloc.dst_handle, release);
223 qxl_release_free(qdev, release); 223 if (!reloc_info[i].dst_bo) {
224 return -EINVAL; 224 ret = -EINVAL;
225 } 225 reloc_info[i].src_bo = NULL;
226 reloc_dst_offset = 0; 226 goto out_free_bos;
227 } else {
228 reloc_dst_bo = cmd_bo;
229 reloc_dst_offset = release->release_offset;
230 } 227 }
231 228 reloc_info[i].dst_offset = reloc.dst_offset;
232 /* reserve and validate the reloc dst bo */ 229 } else {
233 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { 230 reloc_info[i].dst_bo = cmd_bo;
234 reloc_src_bo = 231 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
235 qxlhw_handle_to_bo(qdev, file_priv, 232 }
236 reloc.src_handle, &reloc_list); 233 num_relocs++;
237 if (!reloc_src_bo) { 234
238 if (reloc_dst_bo != cmd_bo) 235 /* reserve and validate the reloc dst bo */
239 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); 236 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
240 qxl_bo_list_unreserve(&reloc_list, true); 237 reloc_info[i].src_bo =
241 qxl_release_unreserve(qdev, release); 238 qxlhw_handle_to_bo(qdev, file_priv,
242 qxl_release_free(qdev, release); 239 reloc.src_handle, release);
243 return -EINVAL; 240 if (!reloc_info[i].src_bo) {
244 } 241 if (reloc_info[i].dst_bo != cmd_bo)
245 } else 242 drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
246 reloc_src_bo = NULL; 243 ret = -EINVAL;
247 if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { 244 goto out_free_bos;
248 apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
249 reloc_src_bo, reloc.src_offset);
250 } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
251 apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
252 } else {
253 DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
254 return -EINVAL;
255 } 245 }
246 reloc_info[i].src_offset = reloc.src_offset;
247 } else {
248 reloc_info[i].src_bo = NULL;
249 reloc_info[i].src_offset = 0;
250 }
251 }
256 252
257 if (reloc_src_bo && reloc_src_bo != cmd_bo) { 253 /* validate all buffers */
258 qxl_release_add_res(qdev, release, reloc_src_bo); 254 ret = qxl_release_reserve_list(release, false);
259 drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); 255 if (ret)
260 } 256 goto out_free_bos;
261 257
262 if (reloc_dst_bo != cmd_bo) 258 for (i = 0; i < cmd->relocs_num; ++i) {
263 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); 259 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
264 } 260 apply_reloc(qdev, &reloc_info[i]);
265 qxl_fence_releaseable(qdev, release); 261 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
262 apply_surf_reloc(qdev, &reloc_info[i]);
263 }
266 264
267 ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); 265 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
268 if (ret == -ERESTARTSYS) { 266 if (ret)
269 qxl_release_unreserve(qdev, release); 267 qxl_release_backoff_reserve_list(release);
270 qxl_release_free(qdev, release); 268 else
271 qxl_bo_list_unreserve(&reloc_list, true); 269 qxl_release_fence_buffer_objects(release);
270
271out_free_bos:
272 for (j = 0; j < num_relocs; j++) {
273 if (reloc_info[j].dst_bo != cmd_bo)
274 drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
275 if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
276 drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
277 }
278out_free_release:
279 if (ret)
280 qxl_release_free(qdev, release);
281out_free_reloc:
282 kfree(reloc_info);
283 return ret;
284}
285
286static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
287 struct drm_file *file_priv)
288{
289 struct qxl_device *qdev = dev->dev_private;
290 struct drm_qxl_execbuffer *execbuffer = data;
291 struct drm_qxl_command user_cmd;
292 int cmd_num;
293 int ret;
294
295 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
296
297 struct drm_qxl_command *commands =
298 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
299
300 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
301 sizeof(user_cmd)))
302 return -EFAULT;
303
304 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
305 if (ret)
272 return ret; 306 return ret;
273 }
274 qxl_release_unreserve(qdev, release);
275 } 307 }
276 qxl_bo_list_unreserve(&reloc_list, 0);
277 return 0; 308 return 0;
278} 309}
279 310
@@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
305 goto out; 336 goto out;
306 337
307 if (!qobj->pin_count) { 338 if (!qobj->pin_count) {
308 qxl_ttm_placement_from_domain(qobj, qobj->type); 339 qxl_ttm_placement_from_domain(qobj, qobj->type, false);
309 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 340 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
310 true, false); 341 true, false);
311 if (unlikely(ret)) 342 if (unlikely(ret))
@@ -402,7 +433,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
402 return ret; 433 return ret;
403} 434}
404 435
405struct drm_ioctl_desc qxl_ioctls[] = { 436const struct drm_ioctl_desc qxl_ioctls[] = {
406 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED), 437 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
407 438
408 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED), 439 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 1191fe7788c9..8691c76c5ef0 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
51 return false; 51 return false;
52} 52}
53 53
54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) 54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55{ 55{
56 u32 c = 0; 56 u32 c = 0;
57 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
57 58
58 qbo->placement.fpfn = 0; 59 qbo->placement.fpfn = 0;
59 qbo->placement.lpfn = 0; 60 qbo->placement.lpfn = 0;
60 qbo->placement.placement = qbo->placements; 61 qbo->placement.placement = qbo->placements;
61 qbo->placement.busy_placement = qbo->placements; 62 qbo->placement.busy_placement = qbo->placements;
62 if (domain == QXL_GEM_DOMAIN_VRAM) 63 if (domain == QXL_GEM_DOMAIN_VRAM)
63 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; 64 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
64 if (domain == QXL_GEM_DOMAIN_SURFACE) 65 if (domain == QXL_GEM_DOMAIN_SURFACE)
65 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; 66 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
66 if (domain == QXL_GEM_DOMAIN_CPU) 67 if (domain == QXL_GEM_DOMAIN_CPU)
67 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 68 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
68 if (!c) 69 if (!c)
69 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 70 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
70 qbo->placement.num_placement = c; 71 qbo->placement.num_placement = c;
@@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
73 74
74 75
75int qxl_bo_create(struct qxl_device *qdev, 76int qxl_bo_create(struct qxl_device *qdev,
76 unsigned long size, bool kernel, u32 domain, 77 unsigned long size, bool kernel, bool pinned, u32 domain,
77 struct qxl_surface *surf, 78 struct qxl_surface *surf,
78 struct qxl_bo **bo_ptr) 79 struct qxl_bo **bo_ptr)
79{ 80{
@@ -97,17 +98,16 @@ int qxl_bo_create(struct qxl_device *qdev,
97 kfree(bo); 98 kfree(bo);
98 return r; 99 return r;
99 } 100 }
100 bo->gem_base.driver_private = NULL;
101 bo->type = domain; 101 bo->type = domain;
102 bo->pin_count = 0; 102 bo->pin_count = pinned ? 1 : 0;
103 bo->surface_id = 0; 103 bo->surface_id = 0;
104 qxl_fence_init(qdev, &bo->fence); 104 qxl_fence_init(qdev, &bo->fence);
105 INIT_LIST_HEAD(&bo->list); 105 INIT_LIST_HEAD(&bo->list);
106 atomic_set(&bo->reserve_count, 0); 106
107 if (surf) 107 if (surf)
108 bo->surf = *surf; 108 bo->surf = *surf;
109 109
110 qxl_ttm_placement_from_domain(bo, domain); 110 qxl_ttm_placement_from_domain(bo, domain, pinned);
111 111
112 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 112 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
113 &bo->placement, 0, !kernel, NULL, size, 113 &bo->placement, 0, !kernel, NULL, size,
@@ -228,7 +228,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
228int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 228int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
229{ 229{
230 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 230 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
231 int r, i; 231 int r;
232 232
233 if (bo->pin_count) { 233 if (bo->pin_count) {
234 bo->pin_count++; 234 bo->pin_count++;
@@ -236,9 +236,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
236 *gpu_addr = qxl_bo_gpu_offset(bo); 236 *gpu_addr = qxl_bo_gpu_offset(bo);
237 return 0; 237 return 0;
238 } 238 }
239 qxl_ttm_placement_from_domain(bo, domain); 239 qxl_ttm_placement_from_domain(bo, domain, true);
240 for (i = 0; i < bo->placement.num_placement; i++)
241 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
242 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 240 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
243 if (likely(r == 0)) { 241 if (likely(r == 0)) {
244 bo->pin_count = 1; 242 bo->pin_count = 1;
@@ -317,53 +315,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
317 return 0; 315 return 0;
318} 316}
319 317
320void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
321{
322 struct qxl_bo_list *entry, *sf;
323
324 list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
325 qxl_bo_unreserve(entry->bo);
326 list_del(&entry->lhead);
327 kfree(entry);
328 }
329}
330
331int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
332{
333 struct qxl_bo_list *entry;
334 int ret;
335
336 list_for_each_entry(entry, &reloc_list->bos, lhead) {
337 if (entry->bo == bo)
338 return 0;
339 }
340
341 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
342 if (!entry)
343 return -ENOMEM;
344
345 entry->bo = bo;
346 list_add(&entry->lhead, &reloc_list->bos);
347
348 ret = qxl_bo_reserve(bo, false);
349 if (ret)
350 return ret;
351
352 if (!bo->pin_count) {
353 qxl_ttm_placement_from_domain(bo, bo->type);
354 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
355 true, false);
356 if (ret)
357 return ret;
358 }
359
360 /* allocate a surface for reserved + validated buffers */
361 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
362 if (ret)
363 return ret;
364 return 0;
365}
366
367int qxl_surf_evict(struct qxl_device *qdev) 318int qxl_surf_evict(struct qxl_device *qdev)
368{ 319{
369 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); 320 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ee7ad79ce781..d458a140c024 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -59,7 +59,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
59 59
60static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) 60static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
61{ 61{
62 return bo->tbo.addr_space_offset; 62 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
63} 63}
64 64
65static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, 65static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
@@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
88 88
89extern int qxl_bo_create(struct qxl_device *qdev, 89extern int qxl_bo_create(struct qxl_device *qdev,
90 unsigned long size, 90 unsigned long size,
91 bool kernel, u32 domain, 91 bool kernel, bool pinned, u32 domain,
92 struct qxl_surface *surf, 92 struct qxl_surface *surf,
93 struct qxl_bo **bo_ptr); 93 struct qxl_bo **bo_ptr);
94extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); 94extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
@@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
99extern void qxl_bo_unref(struct qxl_bo **bo); 99extern void qxl_bo_unref(struct qxl_bo **bo);
100extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); 100extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
101extern int qxl_bo_unpin(struct qxl_bo *bo); 101extern int qxl_bo_unpin(struct qxl_bo *bo);
102extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); 102extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
103extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); 103extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
104 104
105extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
106extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
107#endif 105#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b443d6751d5f..0109a9644cb2 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -38,7 +38,8 @@
38 38
39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
41uint64_t 41
42static uint64_t
42qxl_release_alloc(struct qxl_device *qdev, int type, 43qxl_release_alloc(struct qxl_device *qdev, int type,
43 struct qxl_release **ret) 44 struct qxl_release **ret)
44{ 45{
@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
53 return 0; 54 return 0;
54 } 55 }
55 release->type = type; 56 release->type = type;
56 release->bo_count = 0;
57 release->release_offset = 0; 57 release->release_offset = 0;
58 release->surface_release_id = 0; 58 release->surface_release_id = 0;
59 INIT_LIST_HEAD(&release->bos);
59 60
60 idr_preload(GFP_KERNEL); 61 idr_preload(GFP_KERNEL);
61 spin_lock(&qdev->release_idr_lock); 62 spin_lock(&qdev->release_idr_lock);
@@ -77,20 +78,20 @@ void
77qxl_release_free(struct qxl_device *qdev, 78qxl_release_free(struct qxl_device *qdev,
78 struct qxl_release *release) 79 struct qxl_release *release)
79{ 80{
80 int i; 81 struct qxl_bo_list *entry, *tmp;
81 82 QXL_INFO(qdev, "release %d, type %d\n", release->id,
82 QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, 83 release->type);
83 release->type, release->bo_count);
84 84
85 if (release->surface_release_id) 85 if (release->surface_release_id)
86 qxl_surface_id_dealloc(qdev, release->surface_release_id); 86 qxl_surface_id_dealloc(qdev, release->surface_release_id);
87 87
88 for (i = 0 ; i < release->bo_count; ++i) { 88 list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
89 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
89 QXL_INFO(qdev, "release %llx\n", 90 QXL_INFO(qdev, "release %llx\n",
90 release->bos[i]->tbo.addr_space_offset 91 drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
91 - DRM_FILE_OFFSET); 92 - DRM_FILE_OFFSET);
92 qxl_fence_remove_release(&release->bos[i]->fence, release->id); 93 qxl_fence_remove_release(&bo->fence, release->id);
93 qxl_bo_unref(&release->bos[i]); 94 qxl_bo_unref(&bo);
94 } 95 }
95 spin_lock(&qdev->release_idr_lock); 96 spin_lock(&qdev->release_idr_lock);
96 idr_remove(&qdev->release_idr, release->id); 97 idr_remove(&qdev->release_idr, release->id);
@@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev,
98 kfree(release); 99 kfree(release);
99} 100}
100 101
101void
102qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
103 struct qxl_bo *bo)
104{
105 int i;
106 for (i = 0; i < release->bo_count; i++)
107 if (release->bos[i] == bo)
108 return;
109
110 if (release->bo_count >= QXL_MAX_RES) {
111 DRM_ERROR("exceeded max resource on a qxl_release item\n");
112 return;
113 }
114 release->bos[release->bo_count++] = qxl_bo_ref(bo);
115}
116
117static int qxl_release_bo_alloc(struct qxl_device *qdev, 102static int qxl_release_bo_alloc(struct qxl_device *qdev,
118 struct qxl_bo **bo) 103 struct qxl_bo **bo)
119{ 104{
120 int ret; 105 int ret;
121 ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, 106 /* pin releases bo's they are too messy to evict */
107 ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
108 QXL_GEM_DOMAIN_VRAM, NULL,
122 bo); 109 bo);
123 return ret; 110 return ret;
124} 111}
125 112
126int qxl_release_reserve(struct qxl_device *qdev, 113int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
127 struct qxl_release *release, bool no_wait) 114{
115 struct qxl_bo_list *entry;
116
117 list_for_each_entry(entry, &release->bos, tv.head) {
118 if (entry->tv.bo == &bo->tbo)
119 return 0;
120 }
121
122 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
123 if (!entry)
124 return -ENOMEM;
125
126 qxl_bo_ref(bo);
127 entry->tv.bo = &bo->tbo;
128 list_add_tail(&entry->tv.head, &release->bos);
129 return 0;
130}
131
132static int qxl_release_validate_bo(struct qxl_bo *bo)
128{ 133{
129 int ret; 134 int ret;
130 if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { 135
131 ret = qxl_bo_reserve(release->bos[0], no_wait); 136 if (!bo->pin_count) {
137 qxl_ttm_placement_from_domain(bo, bo->type, false);
138 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
139 true, false);
132 if (ret) 140 if (ret)
133 return ret; 141 return ret;
134 } 142 }
143
144 /* allocate a surface for reserved + validated buffers */
145 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
146 if (ret)
147 return ret;
148 return 0;
149}
150
151int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
152{
153 int ret;
154 struct qxl_bo_list *entry;
155
156 /* if only one object on the release its the release itself
157 since these objects are pinned no need to reserve */
158 if (list_is_singular(&release->bos))
159 return 0;
160
161 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
162 if (ret)
163 return ret;
164
165 list_for_each_entry(entry, &release->bos, tv.head) {
166 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
167
168 ret = qxl_release_validate_bo(bo);
169 if (ret) {
170 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
171 return ret;
172 }
173 }
135 return 0; 174 return 0;
136} 175}
137 176
138void qxl_release_unreserve(struct qxl_device *qdev, 177void qxl_release_backoff_reserve_list(struct qxl_release *release)
139 struct qxl_release *release)
140{ 178{
141 if (atomic_dec_and_test(&release->bos[0]->reserve_count)) 179 /* if only one object on the release its the release itself
142 qxl_bo_unreserve(release->bos[0]); 180 since these objects are pinned no need to reserve */
181 if (list_is_singular(&release->bos))
182 return;
183
184 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
143} 185}
144 186
187
145int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 188int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
146 enum qxl_surface_cmd_type surface_cmd_type, 189 enum qxl_surface_cmd_type surface_cmd_type,
147 struct qxl_release *create_rel, 190 struct qxl_release *create_rel,
148 struct qxl_release **release) 191 struct qxl_release **release)
149{ 192{
150 int ret;
151
152 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 193 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
153 int idr_ret; 194 int idr_ret;
195 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
154 struct qxl_bo *bo; 196 struct qxl_bo *bo;
155 union qxl_release_info *info; 197 union qxl_release_info *info;
156 198
157 /* stash the release after the create command */ 199 /* stash the release after the create command */
158 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 200 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
159 bo = qxl_bo_ref(create_rel->bos[0]); 201 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
160 202
161 (*release)->release_offset = create_rel->release_offset + 64; 203 (*release)->release_offset = create_rel->release_offset + 64;
162 204
163 qxl_release_add_res(qdev, *release, bo); 205 qxl_release_list_add(*release, bo);
164 206
165 ret = qxl_release_reserve(qdev, *release, false);
166 if (ret) {
167 DRM_ERROR("release reserve failed\n");
168 goto out_unref;
169 }
170 info = qxl_release_map(qdev, *release); 207 info = qxl_release_map(qdev, *release);
171 info->id = idr_ret; 208 info->id = idr_ret;
172 qxl_release_unmap(qdev, *release, info); 209 qxl_release_unmap(qdev, *release, info);
173 210
174
175out_unref:
176 qxl_bo_unref(&bo); 211 qxl_bo_unref(&bo);
177 return ret; 212 return 0;
178 } 213 }
179 214
180 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), 215 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
@@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
187{ 222{
188 struct qxl_bo *bo; 223 struct qxl_bo *bo;
189 int idr_ret; 224 int idr_ret;
190 int ret; 225 int ret = 0;
191 union qxl_release_info *info; 226 union qxl_release_info *info;
192 int cur_idx; 227 int cur_idx;
193 228
@@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
216 mutex_unlock(&qdev->release_mutex); 251 mutex_unlock(&qdev->release_mutex);
217 return ret; 252 return ret;
218 } 253 }
219
220 /* pin releases bo's they are too messy to evict */
221 ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
222 qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
223 qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
224 } 254 }
225 255
226 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 256 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
@@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
231 if (rbo) 261 if (rbo)
232 *rbo = bo; 262 *rbo = bo;
233 263
234 qxl_release_add_res(qdev, *release, bo);
235
236 ret = qxl_release_reserve(qdev, *release, false);
237 mutex_unlock(&qdev->release_mutex); 264 mutex_unlock(&qdev->release_mutex);
238 if (ret) 265
239 goto out_unref; 266 qxl_release_list_add(*release, bo);
240 267
241 info = qxl_release_map(qdev, *release); 268 info = qxl_release_map(qdev, *release);
242 info->id = idr_ret; 269 info->id = idr_ret;
243 qxl_release_unmap(qdev, *release, info); 270 qxl_release_unmap(qdev, *release, info);
244 271
245out_unref:
246 qxl_bo_unref(&bo); 272 qxl_bo_unref(&bo);
247 return ret; 273 return ret;
248} 274}
249 275
250int qxl_fence_releaseable(struct qxl_device *qdev,
251 struct qxl_release *release)
252{
253 int i, ret;
254 for (i = 0; i < release->bo_count; i++) {
255 if (!release->bos[i]->tbo.sync_obj)
256 release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
257 ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
258 if (ret)
259 return ret;
260 }
261 return 0;
262}
263
264struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 276struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
265 uint64_t id) 277 uint64_t id)
266{ 278{
@@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
273 DRM_ERROR("failed to find id in release_idr\n"); 285 DRM_ERROR("failed to find id in release_idr\n");
274 return NULL; 286 return NULL;
275 } 287 }
276 if (release->bo_count < 1) { 288
277 DRM_ERROR("read a released resource with 0 bos\n");
278 return NULL;
279 }
280 return release; 289 return release;
281} 290}
282 291
@@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
285{ 294{
286 void *ptr; 295 void *ptr;
287 union qxl_release_info *info; 296 union qxl_release_info *info;
288 struct qxl_bo *bo = release->bos[0]; 297 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
298 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
289 299
290 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 300 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
301 if (!ptr)
302 return NULL;
291 info = ptr + (release->release_offset & ~PAGE_SIZE); 303 info = ptr + (release->release_offset & ~PAGE_SIZE);
292 return info; 304 return info;
293} 305}
@@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev,
296 struct qxl_release *release, 308 struct qxl_release *release,
297 union qxl_release_info *info) 309 union qxl_release_info *info)
298{ 310{
299 struct qxl_bo *bo = release->bos[0]; 311 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
312 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
300 void *ptr; 313 void *ptr;
301 314
302 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 315 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
303 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 316 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
304} 317}
318
319void qxl_release_fence_buffer_objects(struct qxl_release *release)
320{
321 struct ttm_validate_buffer *entry;
322 struct ttm_buffer_object *bo;
323 struct ttm_bo_global *glob;
324 struct ttm_bo_device *bdev;
325 struct ttm_bo_driver *driver;
326 struct qxl_bo *qbo;
327
328 /* if only one object on the release its the release itself
329 since these objects are pinned no need to reserve */
330 if (list_is_singular(&release->bos))
331 return;
332
333 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
334 bdev = bo->bdev;
335 driver = bdev->driver;
336 glob = bo->glob;
337
338 spin_lock(&glob->lru_lock);
339 spin_lock(&bdev->fence_lock);
340
341 list_for_each_entry(entry, &release->bos, head) {
342 bo = entry->bo;
343 qbo = to_qxl_bo(bo);
344
345 if (!entry->bo->sync_obj)
346 entry->bo->sync_obj = &qbo->fence;
347
348 qxl_fence_add_release_locked(&qbo->fence, release->id);
349
350 ttm_bo_add_to_lru(bo);
351 ww_mutex_unlock(&bo->resv->lock);
352 entry->reserved = false;
353 }
354 spin_unlock(&bdev->fence_lock);
355 spin_unlock(&glob->lru_lock);
356 ww_acquire_fini(&release->ticket);
357}
358
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 489cb8cece4d..037786d7c1dc 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -206,13 +206,15 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
206 return; 206 return;
207 } 207 }
208 qbo = container_of(bo, struct qxl_bo, tbo); 208 qbo = container_of(bo, struct qxl_bo, tbo);
209 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); 209 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
210 *placement = qbo->placement; 210 *placement = qbo->placement;
211} 211}
212 212
213static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) 213static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
214{ 214{
215 return 0; 215 struct qxl_bo *qbo = to_qxl_bo(bo);
216
217 return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
216} 218}
217 219
218static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 220static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index d4660cf942a5..c451257f08fb 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -540,7 +540,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
540 dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle 540 dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
541 + init->ring_size / sizeof(u32)); 541 + init->ring_size / sizeof(u32));
542 dev_priv->ring.size = init->ring_size; 542 dev_priv->ring.size = init->ring_size;
543 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); 543 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
544 544
545 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 545 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
546 546
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 472c38fe123f..5bd307cd8da1 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -48,7 +48,6 @@ static const struct file_operations r128_driver_fops = {
48 .unlocked_ioctl = drm_ioctl, 48 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap, 49 .mmap = drm_mmap,
50 .poll = drm_poll, 50 .poll = drm_poll,
51 .fasync = drm_fasync,
52#ifdef CONFIG_COMPAT 51#ifdef CONFIG_COMPAT
53 .compat_ioctl = r128_compat_ioctl, 52 .compat_ioctl = r128_compat_ioctl,
54#endif 53#endif
@@ -57,7 +56,7 @@ static const struct file_operations r128_driver_fops = {
57 56
58static struct drm_driver driver = { 57static struct drm_driver driver = {
59 .driver_features = 58 .driver_features =
60 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 59 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
61 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 60 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
62 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 61 .dev_priv_size = sizeof(drm_r128_buf_priv_t),
63 .load = r128_driver_load, 62 .load = r128_driver_load,
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 930c71b2fb5e..56eb5e3f5439 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -131,7 +131,7 @@ typedef struct drm_r128_buf_priv {
131 drm_r128_freelist_t *list_entry; 131 drm_r128_freelist_t *list_entry;
132} drm_r128_buf_priv_t; 132} drm_r128_buf_priv_t;
133 133
134extern struct drm_ioctl_desc r128_ioctls[]; 134extern const struct drm_ioctl_desc r128_ioctls[];
135extern int r128_max_ioctl; 135extern int r128_max_ioctl;
136 136
137 /* r128_cce.c */ 137 /* r128_cce.c */
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 19bb7e6f3d9a..01dd9aef9f0e 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1643,7 +1643,7 @@ void r128_driver_lastclose(struct drm_device *dev)
1643 r128_do_cleanup_cce(dev); 1643 r128_do_cleanup_cce(dev);
1644} 1644}
1645 1645
1646struct drm_ioctl_desc r128_ioctls[] = { 1646const struct drm_ioctl_desc r128_ioctls[] = {
1647 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1647 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1648 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1648 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1649 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1649 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index c3df52c1a60c..306364a1ecda 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -72,14 +72,32 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
72 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 72 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
73 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 73 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
74 r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \ 74 r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
75 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 75 radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ 79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ 80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ 81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o 82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
83 ci_dpm.o dce6_afmt.o
84
85# add async DMA block
86radeon-y += \
87 r600_dma.o \
88 rv770_dma.o \
89 evergreen_dma.o \
90 ni_dma.o \
91 si_dma.o \
92 cik_sdma.o \
93
94# add UVD block
95radeon-y += \
96 radeon_uvd.o \
97 uvd_v1_0.o \
98 uvd_v2_2.o \
99 uvd_v3_1.o \
100 uvd_v4_2.o
83 101
84radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 102radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
85radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 103radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index fb441a790f3d..15da7ef344a4 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1222 int r; 1222 int r;
1223 1223
1224 mutex_lock(&ctx->mutex); 1224 mutex_lock(&ctx->mutex);
1225 /* reset data block */
1226 ctx->data_block = 0;
1225 /* reset reg block */ 1227 /* reset reg block */
1226 ctx->reg_block = 0; 1228 ctx->reg_block = 0;
1227 /* reset fb window */ 1229 /* reset fb window */
1228 ctx->fb_base = 0; 1230 ctx->fb_base = 0;
1229 /* reset io mode */ 1231 /* reset io mode */
1230 ctx->io_mode = ATOM_IO_MM; 1232 ctx->io_mode = ATOM_IO_MM;
1233 /* reset divmul */
1234 ctx->divmul[0] = 0;
1235 ctx->divmul[1] = 0;
1231 r = atom_execute_table_locked(ctx, index, params); 1236 r = atom_execute_table_locked(ctx, index, params);
1232 mutex_unlock(&ctx->mutex); 1237 mutex_unlock(&ctx->mutex);
1233 return r; 1238 return r;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 16b120c3f144..af10f8571d87 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -7661,618 +7661,6 @@ typedef struct _ATOM_POWERPLAY_INFO_V3
7661 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 7661 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
7662}ATOM_POWERPLAY_INFO_V3; 7662}ATOM_POWERPLAY_INFO_V3;
7663 7663
7664/* New PPlib */
7665/**************************************************************************/
7666typedef struct _ATOM_PPLIB_THERMALCONTROLLER
7667
7668{
7669 UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
7670 UCHAR ucI2cLine; // as interpreted by DAL I2C
7671 UCHAR ucI2cAddress;
7672 UCHAR ucFanParameters; // Fan Control Parameters.
7673 UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
7674 UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
7675 UCHAR ucReserved; // ----
7676 UCHAR ucFlags; // to be defined
7677} ATOM_PPLIB_THERMALCONTROLLER;
7678
7679#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
7680#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
7681
7682#define ATOM_PP_THERMALCONTROLLER_NONE 0
7683#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
7684#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
7685#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
7686#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
7687#define ATOM_PP_THERMALCONTROLLER_LM64 5
7688#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
7689#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
7690#define ATOM_PP_THERMALCONTROLLER_RV770 8
7691#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
7692#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
7693#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
7694#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
7695#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
7696#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
7697#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
7698#define ATOM_PP_THERMALCONTROLLER_LM96163 17
7699#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
7700
7701// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
7702// We probably should reserve the bit 0x80 for this use.
7703// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
7704// The driver can pick the correct internal controller based on the ASIC.
7705
7706#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
7707#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
7708
7709typedef struct _ATOM_PPLIB_STATE
7710{
7711 UCHAR ucNonClockStateIndex;
7712 UCHAR ucClockStateIndices[1]; // variable-sized
7713} ATOM_PPLIB_STATE;
7714
7715
7716typedef struct _ATOM_PPLIB_FANTABLE
7717{
7718 UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
7719 UCHAR ucTHyst; // Temperature hysteresis. Integer.
7720 USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
7721 USHORT usTMed; // The middle temperature where we change slopes.
7722 USHORT usTHigh; // The high point above TMed for adjusting the second slope.
7723 USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
7724 USHORT usPWMMed; // The PWM value (in percent) at TMed.
7725 USHORT usPWMHigh; // The PWM value at THigh.
7726} ATOM_PPLIB_FANTABLE;
7727
7728typedef struct _ATOM_PPLIB_FANTABLE2
7729{
7730 ATOM_PPLIB_FANTABLE basicTable;
7731 USHORT usTMax; // The max temperature
7732} ATOM_PPLIB_FANTABLE2;
7733
7734typedef struct _ATOM_PPLIB_EXTENDEDHEADER
7735{
7736 USHORT usSize;
7737 ULONG ulMaxEngineClock; // For Overdrive.
7738 ULONG ulMaxMemoryClock; // For Overdrive.
7739 // Add extra system parameters here, always adjust size to include all fields.
7740 USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
7741 USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
7742 USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
7743 USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
7744} ATOM_PPLIB_EXTENDEDHEADER;
7745
7746//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
7747#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
7748#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
7749#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
7750#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
7751#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
7752#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
7753#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
7754#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
7755#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
7756#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
7757#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
7758#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
7759#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
7760#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
7761#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
7762#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
7763#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
7764#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
7765#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
7766#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
7767#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
7768#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
7769
7770typedef struct _ATOM_PPLIB_POWERPLAYTABLE
7771{
7772 ATOM_COMMON_TABLE_HEADER sHeader;
7773
7774 UCHAR ucDataRevision;
7775
7776 UCHAR ucNumStates;
7777 UCHAR ucStateEntrySize;
7778 UCHAR ucClockInfoSize;
7779 UCHAR ucNonClockSize;
7780
7781 // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
7782 USHORT usStateArrayOffset;
7783
7784 // offset from start of this table to array of ASIC-specific structures,
7785 // currently ATOM_PPLIB_CLOCK_INFO.
7786 USHORT usClockInfoArrayOffset;
7787
7788 // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
7789 USHORT usNonClockInfoArrayOffset;
7790
7791 USHORT usBackbiasTime; // in microseconds
7792 USHORT usVoltageTime; // in microseconds
7793 USHORT usTableSize; //the size of this structure, or the extended structure
7794
7795 ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
7796
7797 ATOM_PPLIB_THERMALCONTROLLER sThermalController;
7798
7799 USHORT usBootClockInfoOffset;
7800 USHORT usBootNonClockInfoOffset;
7801
7802} ATOM_PPLIB_POWERPLAYTABLE;
7803
7804typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
7805{
7806 ATOM_PPLIB_POWERPLAYTABLE basicTable;
7807 UCHAR ucNumCustomThermalPolicy;
7808 USHORT usCustomThermalPolicyArrayOffset;
7809}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
7810
7811typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
7812{
7813 ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
7814 USHORT usFormatID; // To be used ONLY by PPGen.
7815 USHORT usFanTableOffset;
7816 USHORT usExtendendedHeaderOffset;
7817} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
7818
7819typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
7820{
7821 ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
7822 ULONG ulGoldenPPID; // PPGen use only
7823 ULONG ulGoldenRevision; // PPGen use only
7824 USHORT usVddcDependencyOnSCLKOffset;
7825 USHORT usVddciDependencyOnMCLKOffset;
7826 USHORT usVddcDependencyOnMCLKOffset;
7827 USHORT usMaxClockVoltageOnDCOffset;
7828 USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
7829 USHORT usMvddDependencyOnMCLKOffset;
7830} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
7831
7832typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
7833{
7834 ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
7835 ULONG ulTDPLimit;
7836 ULONG ulNearTDPLimit;
7837 ULONG ulSQRampingThreshold;
7838 USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
7839 ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
7840 USHORT usTDPODLimit;
7841 USHORT usLoadLineSlope; // in milliOhms * 100
7842} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
7843
7844//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
7845#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
7846#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
7847#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
7848#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
7849#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
7850#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
7851// 2, 4, 6, 7 are reserved
7852
7853#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
7854#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
7855#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
7856#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
7857#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
7858#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
7859#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
7860#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
7861#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
7862#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
7863#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
7864#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
7865#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
7866
7867//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
7868#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
7869#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
7870#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
7871
7872//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
7873#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
7874#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
7875
7876// 0 is 2.5Gb/s, 1 is 5Gb/s
7877#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
7878#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
7879
7880// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
7881#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
7882#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
7883
7884// lookup into reduced refresh-rate table
7885#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
7886#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
7887
7888#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
7889#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
7890// 2-15 TBD as needed.
7891
7892#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
7893#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
7894
7895#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
7896
7897#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
7898
7899//memory related flags
7900#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
7901
7902//M3 Arb //2bits, current 3 sets of parameters in total
7903#define ATOM_PPLIB_M3ARB_MASK 0x00060000
7904#define ATOM_PPLIB_M3ARB_SHIFT 17
7905
7906#define ATOM_PPLIB_ENABLE_DRR 0x00080000
7907
7908// remaining 16 bits are reserved
7909typedef struct _ATOM_PPLIB_THERMAL_STATE
7910{
7911 UCHAR ucMinTemperature;
7912 UCHAR ucMaxTemperature;
7913 UCHAR ucThermalAction;
7914}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
7915
7916// Contained in an array starting at the offset
7917// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
7918// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
7919#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
7920#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
7921typedef struct _ATOM_PPLIB_NONCLOCK_INFO
7922{
7923 USHORT usClassification;
7924 UCHAR ucMinTemperature;
7925 UCHAR ucMaxTemperature;
7926 ULONG ulCapsAndSettings;
7927 UCHAR ucRequiredPower;
7928 USHORT usClassification2;
7929 ULONG ulVCLK;
7930 ULONG ulDCLK;
7931 UCHAR ucUnused[5];
7932} ATOM_PPLIB_NONCLOCK_INFO;
7933
7934// Contained in an array starting at the offset
7935// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
7936// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
7937typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
7938{
7939 USHORT usEngineClockLow;
7940 UCHAR ucEngineClockHigh;
7941
7942 USHORT usMemoryClockLow;
7943 UCHAR ucMemoryClockHigh;
7944
7945 USHORT usVDDC;
7946 USHORT usUnused1;
7947 USHORT usUnused2;
7948
7949 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
7950
7951} ATOM_PPLIB_R600_CLOCK_INFO;
7952
7953// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
7954#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
7955#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
7956#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
7957#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
7958#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
7959#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
7960
7961typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
7962{
7963 USHORT usEngineClockLow;
7964 UCHAR ucEngineClockHigh;
7965
7966 USHORT usMemoryClockLow;
7967 UCHAR ucMemoryClockHigh;
7968
7969 USHORT usVDDC;
7970 USHORT usVDDCI;
7971 USHORT usUnused;
7972
7973 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
7974
7975} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
7976
7977typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
7978{
7979 USHORT usEngineClockLow;
7980 UCHAR ucEngineClockHigh;
7981
7982 USHORT usMemoryClockLow;
7983 UCHAR ucMemoryClockHigh;
7984
7985 USHORT usVDDC;
7986 USHORT usVDDCI;
7987 UCHAR ucPCIEGen;
7988 UCHAR ucUnused1;
7989
7990 ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
7991
7992} ATOM_PPLIB_SI_CLOCK_INFO;
7993
7994typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
7995{
7996 USHORT usEngineClockLow;
7997 UCHAR ucEngineClockHigh;
7998
7999 USHORT usMemoryClockLow;
8000 UCHAR ucMemoryClockHigh;
8001
8002 UCHAR ucPCIEGen;
8003 USHORT usPCIELane;
8004} ATOM_PPLIB_CI_CLOCK_INFO;
8005
8006typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
8007
8008{
8009 USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
8010 UCHAR ucLowEngineClockHigh;
8011 USHORT usHighEngineClockLow; // High Engine clock in MHz.
8012 UCHAR ucHighEngineClockHigh;
8013 USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
8014 UCHAR ucMemoryClockHigh; // Currentyl unused.
8015 UCHAR ucPadding; // For proper alignment and size.
8016 USHORT usVDDC; // For the 780, use: None, Low, High, Variable
8017 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
8018 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
8019 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
8020 ULONG ulFlags;
8021} ATOM_PPLIB_RS780_CLOCK_INFO;
8022
8023#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
8024#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
8025#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
8026#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
8027
8028#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
8029#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
8030#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
8031
8032#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
8033#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
8034#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
8035
8036typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
8037 USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
8038 UCHAR ucEngineClockHigh; //clockfrequency >> 16.
8039 UCHAR vddcIndex; //2-bit vddc index;
8040 USHORT tdpLimit;
8041 //please initalize to 0
8042 USHORT rsv1;
8043 //please initialize to 0s
8044 ULONG rsv2[2];
8045}ATOM_PPLIB_SUMO_CLOCK_INFO;
8046
8047
8048
8049typedef struct _ATOM_PPLIB_STATE_V2
8050{
8051 //number of valid dpm levels in this state; Driver uses it to calculate the whole
8052 //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
8053 UCHAR ucNumDPMLevels;
8054
8055 //a index to the array of nonClockInfos
8056 UCHAR nonClockInfoIndex;
8057 /**
8058 * Driver will read the first ucNumDPMLevels in this array
8059 */
8060 UCHAR clockInfoIndex[1];
8061} ATOM_PPLIB_STATE_V2;
8062
8063typedef struct _StateArray{
8064 //how many states we have
8065 UCHAR ucNumEntries;
8066
8067 ATOM_PPLIB_STATE_V2 states[1];
8068}StateArray;
8069
8070
8071typedef struct _ClockInfoArray{
8072 //how many clock levels we have
8073 UCHAR ucNumEntries;
8074
8075 //sizeof(ATOM_PPLIB_CLOCK_INFO)
8076 UCHAR ucEntrySize;
8077
8078 UCHAR clockInfo[1];
8079}ClockInfoArray;
8080
8081typedef struct _NonClockInfoArray{
8082
8083 //how many non-clock levels we have. normally should be same as number of states
8084 UCHAR ucNumEntries;
8085 //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
8086 UCHAR ucEntrySize;
8087
8088 ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
8089}NonClockInfoArray;
8090
8091typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
8092{
8093 USHORT usClockLow;
8094 UCHAR ucClockHigh;
8095 USHORT usVoltage;
8096}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
8097
8098typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
8099{
8100 UCHAR ucNumEntries; // Number of entries.
8101 ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
8102}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
8103
8104typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
8105{
8106 USHORT usSclkLow;
8107 UCHAR ucSclkHigh;
8108 USHORT usMclkLow;
8109 UCHAR ucMclkHigh;
8110 USHORT usVddc;
8111 USHORT usVddci;
8112}ATOM_PPLIB_Clock_Voltage_Limit_Record;
8113
8114typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
8115{
8116 UCHAR ucNumEntries; // Number of entries.
8117 ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
8118}ATOM_PPLIB_Clock_Voltage_Limit_Table;
8119
8120typedef struct _ATOM_PPLIB_CAC_Leakage_Record
8121{
8122 USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value.
8123 ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value.
8124}ATOM_PPLIB_CAC_Leakage_Record;
8125
8126typedef struct _ATOM_PPLIB_CAC_Leakage_Table
8127{
8128 UCHAR ucNumEntries; // Number of entries.
8129 ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
8130}ATOM_PPLIB_CAC_Leakage_Table;
8131
8132typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
8133{
8134 USHORT usVoltage;
8135 USHORT usSclkLow;
8136 UCHAR ucSclkHigh;
8137 USHORT usMclkLow;
8138 UCHAR ucMclkHigh;
8139}ATOM_PPLIB_PhaseSheddingLimits_Record;
8140
8141typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
8142{
8143 UCHAR ucNumEntries; // Number of entries.
8144 ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
8145}ATOM_PPLIB_PhaseSheddingLimits_Table;
8146
8147typedef struct _VCEClockInfo{
8148 USHORT usEVClkLow;
8149 UCHAR ucEVClkHigh;
8150 USHORT usECClkLow;
8151 UCHAR ucECClkHigh;
8152}VCEClockInfo;
8153
8154typedef struct _VCEClockInfoArray{
8155 UCHAR ucNumEntries;
8156 VCEClockInfo entries[1];
8157}VCEClockInfoArray;
8158
8159typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
8160{
8161 USHORT usVoltage;
8162 UCHAR ucVCEClockInfoIndex;
8163}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
8164
8165typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
8166{
8167 UCHAR numEntries;
8168 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
8169}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
8170
8171typedef struct _ATOM_PPLIB_VCE_State_Record
8172{
8173 UCHAR ucVCEClockInfoIndex;
8174 UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
8175}ATOM_PPLIB_VCE_State_Record;
8176
8177typedef struct _ATOM_PPLIB_VCE_State_Table
8178{
8179 UCHAR numEntries;
8180 ATOM_PPLIB_VCE_State_Record entries[1];
8181}ATOM_PPLIB_VCE_State_Table;
8182
8183
8184typedef struct _ATOM_PPLIB_VCE_Table
8185{
8186 UCHAR revid;
8187// VCEClockInfoArray array;
8188// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
8189// ATOM_PPLIB_VCE_State_Table states;
8190}ATOM_PPLIB_VCE_Table;
8191
8192
8193typedef struct _UVDClockInfo{
8194 USHORT usVClkLow;
8195 UCHAR ucVClkHigh;
8196 USHORT usDClkLow;
8197 UCHAR ucDClkHigh;
8198}UVDClockInfo;
8199
8200typedef struct _UVDClockInfoArray{
8201 UCHAR ucNumEntries;
8202 UVDClockInfo entries[1];
8203}UVDClockInfoArray;
8204
8205typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
8206{
8207 USHORT usVoltage;
8208 UCHAR ucUVDClockInfoIndex;
8209}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
8210
8211typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
8212{
8213 UCHAR numEntries;
8214 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
8215}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
8216
8217typedef struct _ATOM_PPLIB_UVD_State_Record
8218{
8219 UCHAR ucUVDClockInfoIndex;
8220 UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
8221}ATOM_PPLIB_UVD_State_Record;
8222
8223typedef struct _ATOM_PPLIB_UVD_State_Table
8224{
8225 UCHAR numEntries;
8226 ATOM_PPLIB_UVD_State_Record entries[1];
8227}ATOM_PPLIB_UVD_State_Table;
8228
8229
8230typedef struct _ATOM_PPLIB_UVD_Table
8231{
8232 UCHAR revid;
8233// UVDClockInfoArray array;
8234// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
8235// ATOM_PPLIB_UVD_State_Table states;
8236}ATOM_PPLIB_UVD_Table;
8237
8238
8239typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
8240{
8241 USHORT usVoltage;
8242 USHORT usSAMClockLow;
8243 UCHAR ucSAMClockHigh;
8244}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
8245
8246typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
8247 UCHAR numEntries;
8248 ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
8249}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
8250
8251typedef struct _ATOM_PPLIB_SAMU_Table
8252{
8253 UCHAR revid;
8254 ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
8255}ATOM_PPLIB_SAMU_Table;
8256
8257#define ATOM_PPM_A_A 1
8258#define ATOM_PPM_A_I 2
8259typedef struct _ATOM_PPLIB_PPM_Table
8260{
8261 UCHAR ucRevId;
8262 UCHAR ucPpmDesign; //A+I or A+A
8263 USHORT usCpuCoreNumber;
8264 ULONG ulPlatformTDP;
8265 ULONG ulSmallACPlatformTDP;
8266 ULONG ulPlatformTDC;
8267 ULONG ulSmallACPlatformTDC;
8268 ULONG ulApuTDP;
8269 ULONG ulDGpuTDP;
8270 ULONG ulDGpuUlvPower;
8271 ULONG ulTjmax;
8272} ATOM_PPLIB_PPM_Table;
8273
8274/**************************************************************************/
8275
8276 7664
8277// Following definitions are for compatibility issue in different SW components. 7665// Following definitions are for compatibility issue in different SW components.
8278#define ATOM_MASTER_DATA_TABLE_REVISION 0x01 7666#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
@@ -8485,3 +7873,6 @@ typedef struct {
8485 7873
8486 7874
8487#endif /* _ATOMBIOS_H */ 7875#endif /* _ATOMBIOS_H */
7876
7877#include "pptable.h"
7878
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b9d3b43f19c0..bf87f6d435f8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1910,6 +1910,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1910 int i; 1910 int i;
1911 1911
1912 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1912 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1913 /* disable the GRPH */
1914 if (ASIC_IS_DCE4(rdev))
1915 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
1916 else if (ASIC_IS_AVIVO(rdev))
1917 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
1918
1913 if (ASIC_IS_DCE6(rdev)) 1919 if (ASIC_IS_DCE6(rdev))
1914 atombios_powergate_crtc(crtc, ATOM_ENABLE); 1920 atombios_powergate_crtc(crtc, ATOM_ENABLE);
1915 1921
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 064023bed480..00885417ffff 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -44,6 +44,41 @@ static char *pre_emph_names[] = {
44}; 44};
45 45
46/***** radeon AUX functions *****/ 46/***** radeon AUX functions *****/
47
48/* Atom needs data in little endian format
49 * so swap as appropriate when copying data to
50 * or from atom. Note that atom operates on
51 * dw units.
52 */
53void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
54{
55#ifdef __BIG_ENDIAN
56 u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
57 u32 *dst32, *src32;
58 int i;
59
60 memcpy(src_tmp, src, num_bytes);
61 src32 = (u32 *)src_tmp;
62 dst32 = (u32 *)dst_tmp;
63 if (to_le) {
64 for (i = 0; i < ((num_bytes + 3) / 4); i++)
65 dst32[i] = cpu_to_le32(src32[i]);
66 memcpy(dst, dst_tmp, num_bytes);
67 } else {
68 u8 dws = num_bytes & ~3;
69 for (i = 0; i < ((num_bytes + 3) / 4); i++)
70 dst32[i] = le32_to_cpu(src32[i]);
71 memcpy(dst, dst_tmp, dws);
72 if (num_bytes % 4) {
73 for (i = 0; i < (num_bytes % 4); i++)
74 dst[dws+i] = dst_tmp[dws+i];
75 }
76 }
77#else
78 memcpy(dst, src, num_bytes);
79#endif
80}
81
47union aux_channel_transaction { 82union aux_channel_transaction {
48 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; 83 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
49 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; 84 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
@@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
65 100
66 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); 101 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
67 102
68 memcpy(base, send, send_bytes); 103 radeon_atom_copy_swap(base, send, send_bytes, true);
69 104
70 args.v1.lpAuxRequest = 0 + 4; 105 args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
71 args.v1.lpDataOut = 16 + 4; 106 args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
72 args.v1.ucDataOutLen = 0; 107 args.v1.ucDataOutLen = 0;
73 args.v1.ucChannelID = chan->rec.i2c_id; 108 args.v1.ucChannelID = chan->rec.i2c_id;
74 args.v1.ucDelay = delay / 10; 109 args.v1.ucDelay = delay / 10;
@@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
102 recv_bytes = recv_size; 137 recv_bytes = recv_size;
103 138
104 if (recv && recv_size) 139 if (recv && recv_size)
105 memcpy(recv, base + 16, recv_bytes); 140 radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
106 141
107 return recv_bytes; 142 return recv_bytes;
108} 143}
@@ -550,7 +585,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
550 return false; 585 return false;
551 } 586 }
552 587
553 DRM_DEBUG_KMS("link status %*ph\n", 6, link_status); 588 DRM_DEBUG_KMS("link status %6ph\n", link_status);
554 return true; 589 return true;
555} 590}
556 591
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 092275d53d4a..dfac7965ea28 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -682,8 +682,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
682int 682int
683atombios_get_encoder_mode(struct drm_encoder *encoder) 683atombios_get_encoder_mode(struct drm_encoder *encoder)
684{ 684{
685 struct drm_device *dev = encoder->dev;
686 struct radeon_device *rdev = dev->dev_private;
687 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 685 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
688 struct drm_connector *connector; 686 struct drm_connector *connector;
689 struct radeon_connector *radeon_connector; 687 struct radeon_connector *radeon_connector;
@@ -710,8 +708,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
710 case DRM_MODE_CONNECTOR_DVII: 708 case DRM_MODE_CONNECTOR_DVII:
711 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
712 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 710 if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
713 radeon_audio && 711 radeon_audio)
714 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
715 return ATOM_ENCODER_MODE_HDMI; 712 return ATOM_ENCODER_MODE_HDMI;
716 else if (radeon_connector->use_digital) 713 else if (radeon_connector->use_digital)
717 return ATOM_ENCODER_MODE_DVI; 714 return ATOM_ENCODER_MODE_DVI;
@@ -722,8 +719,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
722 case DRM_MODE_CONNECTOR_HDMIA: 719 case DRM_MODE_CONNECTOR_HDMIA:
723 default: 720 default:
724 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 721 if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
725 radeon_audio && 722 radeon_audio)
726 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
727 return ATOM_ENCODER_MODE_HDMI; 723 return ATOM_ENCODER_MODE_HDMI;
728 else 724 else
729 return ATOM_ENCODER_MODE_DVI; 725 return ATOM_ENCODER_MODE_DVI;
@@ -737,8 +733,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
737 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
738 return ATOM_ENCODER_MODE_DP; 734 return ATOM_ENCODER_MODE_DP;
739 else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 735 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
740 radeon_audio && 736 radeon_audio)
741 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
742 return ATOM_ENCODER_MODE_HDMI; 737 return ATOM_ENCODER_MODE_HDMI;
743 else 738 else
744 return ATOM_ENCODER_MODE_DVI; 739 return ATOM_ENCODER_MODE_DVI;
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 082338df708a..deaf98cdca3a 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -27,10 +27,12 @@
27#include "radeon.h" 27#include "radeon.h"
28#include "atom.h" 28#include "atom.h"
29 29
30extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
31
30#define TARGET_HW_I2C_CLOCK 50 32#define TARGET_HW_I2C_CLOCK 50
31 33
32/* these are a limitation of ProcessI2cChannelTransaction not the hw */ 34/* these are a limitation of ProcessI2cChannelTransaction not the hw */
33#define ATOM_MAX_HW_I2C_WRITE 2 35#define ATOM_MAX_HW_I2C_WRITE 3
34#define ATOM_MAX_HW_I2C_READ 255 36#define ATOM_MAX_HW_I2C_READ 255
35 37
36static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, 38static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
@@ -50,20 +52,24 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
50 52
51 if (flags & HW_I2C_WRITE) { 53 if (flags & HW_I2C_WRITE) {
52 if (num > ATOM_MAX_HW_I2C_WRITE) { 54 if (num > ATOM_MAX_HW_I2C_WRITE) {
53 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num); 55 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
54 return -EINVAL; 56 return -EINVAL;
55 } 57 }
56 memcpy(&out, buf, num); 58 args.ucRegIndex = buf[0];
59 if (num > 1)
60 memcpy(&out, &buf[1], num - 1);
57 args.lpI2CDataOut = cpu_to_le16(out); 61 args.lpI2CDataOut = cpu_to_le16(out);
58 } else { 62 } else {
59 if (num > ATOM_MAX_HW_I2C_READ) { 63 if (num > ATOM_MAX_HW_I2C_READ) {
60 DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); 64 DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
61 return -EINVAL; 65 return -EINVAL;
62 } 66 }
67 args.ucRegIndex = 0;
68 args.lpI2CDataOut = 0;
63 } 69 }
64 70
71 args.ucFlag = flags;
65 args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; 72 args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
66 args.ucRegIndex = 0;
67 args.ucTransBytes = num; 73 args.ucTransBytes = num;
68 args.ucSlaveAddr = slave_addr << 1; 74 args.ucSlaveAddr = slave_addr << 1;
69 args.ucLineNumber = chan->rec.i2c_id; 75 args.ucLineNumber = chan->rec.i2c_id;
@@ -77,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
77 } 83 }
78 84
79 if (!(flags & HW_I2C_WRITE)) 85 if (!(flags & HW_I2C_WRITE))
80 memcpy(buf, base, num); 86 radeon_atom_copy_swap(buf, base, num, false);
81 87
82 return 0; 88 return 0;
83} 89}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0bfd55e08820..084e69414fd1 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev)
2548{ 2548{
2549 struct rv7xx_power_info *pi; 2549 struct rv7xx_power_info *pi;
2550 struct evergreen_power_info *eg_pi; 2550 struct evergreen_power_info *eg_pi;
2551 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2552 u16 data_offset, size;
2553 u8 frev, crev;
2554 struct atom_clock_dividers dividers; 2551 struct atom_clock_dividers dividers;
2555 int ret; 2552 int ret;
2556 2553
@@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev)
2633 eg_pi->vddci_control = 2630 eg_pi->vddci_control =
2634 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); 2631 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
2635 2632
2636 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 2633 rv770_get_engine_memory_ss(rdev);
2637 &frev, &crev, &data_offset)) {
2638 pi->sclk_ss = true;
2639 pi->mclk_ss = true;
2640 pi->dynamic_ss = true;
2641 } else {
2642 pi->sclk_ss = false;
2643 pi->mclk_ss = false;
2644 pi->dynamic_ss = true;
2645 }
2646 2634
2647 pi->asi = RV770_ASI_DFLT; 2635 pi->asi = RV770_ASI_DFLT;
2648 pi->pasi = CYPRESS_HASI_DFLT; 2636 pi->pasi = CYPRESS_HASI_DFLT;
@@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev)
2659 2647
2660 pi->dynamic_pcie_gen2 = true; 2648 pi->dynamic_pcie_gen2 = true;
2661 2649
2662 if (pi->gfx_clock_gating && 2650 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
2663 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2664 pi->thermal_protection = true; 2651 pi->thermal_protection = true;
2665 else 2652 else
2666 pi->thermal_protection = false; 2653 pi->thermal_protection = false;
@@ -2712,6 +2699,12 @@ int btc_dpm_init(struct radeon_device *rdev)
2712 else 2699 else
2713 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000; 2700 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000;
2714 2701
2702 /* make sure dc limits are valid */
2703 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
2704 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
2705 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
2706 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2707
2715 return 0; 2708 return 0;
2716} 2709}
2717 2710
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
index 19a0114d2e3b..98d009e154bf 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -317,58 +317,4 @@ const u32 cayman_default_state[] =
317 0x00000010, /* */ 317 0x00000010, /* */
318}; 318};
319 319
320const u32 cayman_vs[] =
321{
322 0x00000004,
323 0x80400400,
324 0x0000a03c,
325 0x95000688,
326 0x00004000,
327 0x15000688,
328 0x00000000,
329 0x88000000,
330 0x04000000,
331 0x67961001,
332#ifdef __BIG_ENDIAN
333 0x00020000,
334#else
335 0x00000000,
336#endif
337 0x00000000,
338 0x04000000,
339 0x67961000,
340#ifdef __BIG_ENDIAN
341 0x00020008,
342#else
343 0x00000008,
344#endif
345 0x00000000,
346};
347
348const u32 cayman_ps[] =
349{
350 0x00000004,
351 0xa00c0000,
352 0x00000008,
353 0x80400000,
354 0x00000000,
355 0x95000688,
356 0x00000000,
357 0x88000000,
358 0x00380400,
359 0x00146b10,
360 0x00380000,
361 0x20146b10,
362 0x00380400,
363 0x40146b00,
364 0x80380000,
365 0x60146b00,
366 0x00000010,
367 0x000d1000,
368 0xb0800000,
369 0x00000000,
370};
371
372const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
373const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
374const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state); 320const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
new file mode 100644
index 000000000000..916630fdc796
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -0,0 +1,5239 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "cikd.h"
27#include "r600_dpm.h"
28#include "ci_dpm.h"
29#include "atom.h"
30#include <linux/seq_file.h>
31
32#define MC_CG_ARB_FREQ_F0 0x0a
33#define MC_CG_ARB_FREQ_F1 0x0b
34#define MC_CG_ARB_FREQ_F2 0x0c
35#define MC_CG_ARB_FREQ_F3 0x0d
36
37#define SMC_RAM_END 0x40000
38
39#define VOLTAGE_SCALE 4
40#define VOLTAGE_VID_OFFSET_SCALE1 625
41#define VOLTAGE_VID_OFFSET_SCALE2 100
42
43static const struct ci_pt_defaults defaults_bonaire_xt =
44{
45 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
46 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
47 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
48};
49
50static const struct ci_pt_defaults defaults_bonaire_pro =
51{
52 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
53 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
54 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
55};
56
57static const struct ci_pt_defaults defaults_saturn_xt =
58{
59 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
60 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
61 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
62};
63
64static const struct ci_pt_defaults defaults_saturn_pro =
65{
66 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
67 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
68 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
69};
70
71static const struct ci_pt_config_reg didt_config_ci[] =
72{
73 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
74 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
75 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
76 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
77 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
78 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
79 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
80 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
81 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
82 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
83 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
84 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
85 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
86 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
87 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
88 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
89 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
90 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
104 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
105 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
106 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
108 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0xFFFFFFFF }
146};
147
148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
150 u32 arb_freq_src, u32 arb_freq_dest);
151extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
152extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
153extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
154 u32 max_voltage_steps,
155 struct atom_voltage_table *voltage_table);
156extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
157extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
158extern void cik_update_cg(struct radeon_device *rdev,
159 u32 block, bool enable);
160
161static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
162 struct atom_voltage_table_entry *voltage_table,
163 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
164static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
165static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
166 u32 target_tdp);
167static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
168
169static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
170{
171 struct ci_power_info *pi = rdev->pm.dpm.priv;
172
173 return pi;
174}
175
176static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
177{
178 struct ci_ps *ps = rps->ps_priv;
179
180 return ps;
181}
182
183static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
184{
185 struct ci_power_info *pi = ci_get_pi(rdev);
186
187 switch (rdev->pdev->device) {
188 case 0x6650:
189 case 0x6658:
190 case 0x665C:
191 default:
192 pi->powertune_defaults = &defaults_bonaire_xt;
193 break;
194 case 0x6651:
195 case 0x665D:
196 pi->powertune_defaults = &defaults_bonaire_pro;
197 break;
198 case 0x6640:
199 pi->powertune_defaults = &defaults_saturn_xt;
200 break;
201 case 0x6641:
202 pi->powertune_defaults = &defaults_saturn_pro;
203 break;
204 }
205
206 pi->dte_tj_offset = 0;
207
208 pi->caps_power_containment = true;
209 pi->caps_cac = false;
210 pi->caps_sq_ramping = false;
211 pi->caps_db_ramping = false;
212 pi->caps_td_ramping = false;
213 pi->caps_tcp_ramping = false;
214
215 if (pi->caps_power_containment) {
216 pi->caps_cac = true;
217 pi->enable_bapm_feature = true;
218 pi->enable_tdc_limit_feature = true;
219 pi->enable_pkg_pwr_tracking_feature = true;
220 }
221}
222
223static u8 ci_convert_to_vid(u16 vddc)
224{
225 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
226}
227
228static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
229{
230 struct ci_power_info *pi = ci_get_pi(rdev);
231 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
232 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
233 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
234 u32 i;
235
236 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
237 return -EINVAL;
238 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
239 return -EINVAL;
240 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
241 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
242 return -EINVAL;
243
244 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
245 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
246 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
247 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
248 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
249 } else {
250 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
251 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
252 }
253 }
254 return 0;
255}
256
257static int ci_populate_vddc_vid(struct radeon_device *rdev)
258{
259 struct ci_power_info *pi = ci_get_pi(rdev);
260 u8 *vid = pi->smc_powertune_table.VddCVid;
261 u32 i;
262
263 if (pi->vddc_voltage_table.count > 8)
264 return -EINVAL;
265
266 for (i = 0; i < pi->vddc_voltage_table.count; i++)
267 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
268
269 return 0;
270}
271
272static int ci_populate_svi_load_line(struct radeon_device *rdev)
273{
274 struct ci_power_info *pi = ci_get_pi(rdev);
275 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
276
277 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
278 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
279 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
280 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
281
282 return 0;
283}
284
285static int ci_populate_tdc_limit(struct radeon_device *rdev)
286{
287 struct ci_power_info *pi = ci_get_pi(rdev);
288 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
289 u16 tdc_limit;
290
291 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
292 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
293 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
294 pt_defaults->tdc_vddc_throttle_release_limit_perc;
295 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
296
297 return 0;
298}
299
300static int ci_populate_dw8(struct radeon_device *rdev)
301{
302 struct ci_power_info *pi = ci_get_pi(rdev);
303 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
304 int ret;
305
306 ret = ci_read_smc_sram_dword(rdev,
307 SMU7_FIRMWARE_HEADER_LOCATION +
308 offsetof(SMU7_Firmware_Header, PmFuseTable) +
309 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
310 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
311 pi->sram_end);
312 if (ret)
313 return -EINVAL;
314 else
315 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
316
317 return 0;
318}
319
320static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
321{
322 struct ci_power_info *pi = ci_get_pi(rdev);
323 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
324 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
325 int i, min, max;
326
327 min = max = hi_vid[0];
328 for (i = 0; i < 8; i++) {
329 if (0 != hi_vid[i]) {
330 if (min > hi_vid[i])
331 min = hi_vid[i];
332 if (max < hi_vid[i])
333 max = hi_vid[i];
334 }
335
336 if (0 != lo_vid[i]) {
337 if (min > lo_vid[i])
338 min = lo_vid[i];
339 if (max < lo_vid[i])
340 max = lo_vid[i];
341 }
342 }
343
344 if ((min == 0) || (max == 0))
345 return -EINVAL;
346 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
347 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
348
349 return 0;
350}
351
352static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
353{
354 struct ci_power_info *pi = ci_get_pi(rdev);
355 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
356 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
357 struct radeon_cac_tdp_table *cac_tdp_table =
358 rdev->pm.dpm.dyn_state.cac_tdp_table;
359
360 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
361 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
362
363 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
364 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
365
366 return 0;
367}
368
369static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
370{
371 struct ci_power_info *pi = ci_get_pi(rdev);
372 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
373 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
374 struct radeon_cac_tdp_table *cac_tdp_table =
375 rdev->pm.dpm.dyn_state.cac_tdp_table;
376 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
377 int i, j, k;
378 const u16 *def1;
379 const u16 *def2;
380
381 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
382 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
383
384 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
385 dpm_table->GpuTjMax =
386 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
387 dpm_table->GpuTjHyst = 8;
388
389 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
390
391 if (ppm) {
392 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
393 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
394 } else {
395 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
396 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
397 }
398
399 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
400 def1 = pt_defaults->bapmti_r;
401 def2 = pt_defaults->bapmti_rc;
402
403 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
404 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
405 for (k = 0; k < SMU7_DTE_SINKS; k++) {
406 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
407 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
408 def1++;
409 def2++;
410 }
411 }
412 }
413
414 return 0;
415}
416
417static int ci_populate_pm_base(struct radeon_device *rdev)
418{
419 struct ci_power_info *pi = ci_get_pi(rdev);
420 u32 pm_fuse_table_offset;
421 int ret;
422
423 if (pi->caps_power_containment) {
424 ret = ci_read_smc_sram_dword(rdev,
425 SMU7_FIRMWARE_HEADER_LOCATION +
426 offsetof(SMU7_Firmware_Header, PmFuseTable),
427 &pm_fuse_table_offset, pi->sram_end);
428 if (ret)
429 return ret;
430 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
431 if (ret)
432 return ret;
433 ret = ci_populate_vddc_vid(rdev);
434 if (ret)
435 return ret;
436 ret = ci_populate_svi_load_line(rdev);
437 if (ret)
438 return ret;
439 ret = ci_populate_tdc_limit(rdev);
440 if (ret)
441 return ret;
442 ret = ci_populate_dw8(rdev);
443 if (ret)
444 return ret;
445 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
446 if (ret)
447 return ret;
448 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
449 if (ret)
450 return ret;
451 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
452 (u8 *)&pi->smc_powertune_table,
453 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
454 if (ret)
455 return ret;
456 }
457
458 return 0;
459}
460
461static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
462{
463 struct ci_power_info *pi = ci_get_pi(rdev);
464 u32 data;
465
466 if (pi->caps_sq_ramping) {
467 data = RREG32_DIDT(DIDT_SQ_CTRL0);
468 if (enable)
469 data |= DIDT_CTRL_EN;
470 else
471 data &= ~DIDT_CTRL_EN;
472 WREG32_DIDT(DIDT_SQ_CTRL0, data);
473 }
474
475 if (pi->caps_db_ramping) {
476 data = RREG32_DIDT(DIDT_DB_CTRL0);
477 if (enable)
478 data |= DIDT_CTRL_EN;
479 else
480 data &= ~DIDT_CTRL_EN;
481 WREG32_DIDT(DIDT_DB_CTRL0, data);
482 }
483
484 if (pi->caps_td_ramping) {
485 data = RREG32_DIDT(DIDT_TD_CTRL0);
486 if (enable)
487 data |= DIDT_CTRL_EN;
488 else
489 data &= ~DIDT_CTRL_EN;
490 WREG32_DIDT(DIDT_TD_CTRL0, data);
491 }
492
493 if (pi->caps_tcp_ramping) {
494 data = RREG32_DIDT(DIDT_TCP_CTRL0);
495 if (enable)
496 data |= DIDT_CTRL_EN;
497 else
498 data &= ~DIDT_CTRL_EN;
499 WREG32_DIDT(DIDT_TCP_CTRL0, data);
500 }
501}
502
503static int ci_program_pt_config_registers(struct radeon_device *rdev,
504 const struct ci_pt_config_reg *cac_config_regs)
505{
506 const struct ci_pt_config_reg *config_regs = cac_config_regs;
507 u32 data;
508 u32 cache = 0;
509
510 if (config_regs == NULL)
511 return -EINVAL;
512
513 while (config_regs->offset != 0xFFFFFFFF) {
514 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
515 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
516 } else {
517 switch (config_regs->type) {
518 case CISLANDS_CONFIGREG_SMC_IND:
519 data = RREG32_SMC(config_regs->offset);
520 break;
521 case CISLANDS_CONFIGREG_DIDT_IND:
522 data = RREG32_DIDT(config_regs->offset);
523 break;
524 default:
525 data = RREG32(config_regs->offset << 2);
526 break;
527 }
528
529 data &= ~config_regs->mask;
530 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
531 data |= cache;
532
533 switch (config_regs->type) {
534 case CISLANDS_CONFIGREG_SMC_IND:
535 WREG32_SMC(config_regs->offset, data);
536 break;
537 case CISLANDS_CONFIGREG_DIDT_IND:
538 WREG32_DIDT(config_regs->offset, data);
539 break;
540 default:
541 WREG32(config_regs->offset << 2, data);
542 break;
543 }
544 cache = 0;
545 }
546 config_regs++;
547 }
548 return 0;
549}
550
551static int ci_enable_didt(struct radeon_device *rdev, bool enable)
552{
553 struct ci_power_info *pi = ci_get_pi(rdev);
554 int ret;
555
556 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
557 pi->caps_td_ramping || pi->caps_tcp_ramping) {
558 cik_enter_rlc_safe_mode(rdev);
559
560 if (enable) {
561 ret = ci_program_pt_config_registers(rdev, didt_config_ci);
562 if (ret) {
563 cik_exit_rlc_safe_mode(rdev);
564 return ret;
565 }
566 }
567
568 ci_do_enable_didt(rdev, enable);
569
570 cik_exit_rlc_safe_mode(rdev);
571 }
572
573 return 0;
574}
575
576static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
577{
578 struct ci_power_info *pi = ci_get_pi(rdev);
579 PPSMC_Result smc_result;
580 int ret = 0;
581
582 if (enable) {
583 pi->power_containment_features = 0;
584 if (pi->caps_power_containment) {
585 if (pi->enable_bapm_feature) {
586 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
587 if (smc_result != PPSMC_Result_OK)
588 ret = -EINVAL;
589 else
590 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
591 }
592
593 if (pi->enable_tdc_limit_feature) {
594 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
595 if (smc_result != PPSMC_Result_OK)
596 ret = -EINVAL;
597 else
598 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
599 }
600
601 if (pi->enable_pkg_pwr_tracking_feature) {
602 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
603 if (smc_result != PPSMC_Result_OK) {
604 ret = -EINVAL;
605 } else {
606 struct radeon_cac_tdp_table *cac_tdp_table =
607 rdev->pm.dpm.dyn_state.cac_tdp_table;
608 u32 default_pwr_limit =
609 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
610
611 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
612
613 ci_set_power_limit(rdev, default_pwr_limit);
614 }
615 }
616 }
617 } else {
618 if (pi->caps_power_containment && pi->power_containment_features) {
619 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
620 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
621
622 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
623 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
624
625 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
626 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
627 pi->power_containment_features = 0;
628 }
629 }
630
631 return ret;
632}
633
634static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
635{
636 struct ci_power_info *pi = ci_get_pi(rdev);
637 PPSMC_Result smc_result;
638 int ret = 0;
639
640 if (pi->caps_cac) {
641 if (enable) {
642 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
643 if (smc_result != PPSMC_Result_OK) {
644 ret = -EINVAL;
645 pi->cac_enabled = false;
646 } else {
647 pi->cac_enabled = true;
648 }
649 } else if (pi->cac_enabled) {
650 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
651 pi->cac_enabled = false;
652 }
653 }
654
655 return ret;
656}
657
658static int ci_power_control_set_level(struct radeon_device *rdev)
659{
660 struct ci_power_info *pi = ci_get_pi(rdev);
661 struct radeon_cac_tdp_table *cac_tdp_table =
662 rdev->pm.dpm.dyn_state.cac_tdp_table;
663 s32 adjust_percent;
664 s32 target_tdp;
665 int ret = 0;
666 bool adjust_polarity = false; /* ??? */
667
668 if (pi->caps_power_containment &&
669 (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
670 adjust_percent = adjust_polarity ?
671 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
672 target_tdp = ((100 + adjust_percent) *
673 (s32)cac_tdp_table->configurable_tdp) / 100;
674 target_tdp *= 256;
675
676 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
677 }
678
679 return ret;
680}
681
682void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
683{
684 struct ci_power_info *pi = ci_get_pi(rdev);
685
686 if (pi->uvd_power_gated == gate)
687 return;
688
689 pi->uvd_power_gated = gate;
690
691 ci_update_uvd_dpm(rdev, gate);
692}
693
694bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
695{
696 struct ci_power_info *pi = ci_get_pi(rdev);
697 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
698 u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
699
700 if (vblank_time < switch_limit)
701 return true;
702 else
703 return false;
704
705}
706
707static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
708 struct radeon_ps *rps)
709{
710 struct ci_ps *ps = ci_get_ps(rps);
711 struct ci_power_info *pi = ci_get_pi(rdev);
712 struct radeon_clock_and_voltage_limits *max_limits;
713 bool disable_mclk_switching;
714 u32 sclk, mclk;
715 int i;
716
717 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
718 ci_dpm_vblank_too_short(rdev))
719 disable_mclk_switching = true;
720 else
721 disable_mclk_switching = false;
722
723 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
724 pi->battery_state = true;
725 else
726 pi->battery_state = false;
727
728 if (rdev->pm.dpm.ac_power)
729 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
730 else
731 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
732
733 if (rdev->pm.dpm.ac_power == false) {
734 for (i = 0; i < ps->performance_level_count; i++) {
735 if (ps->performance_levels[i].mclk > max_limits->mclk)
736 ps->performance_levels[i].mclk = max_limits->mclk;
737 if (ps->performance_levels[i].sclk > max_limits->sclk)
738 ps->performance_levels[i].sclk = max_limits->sclk;
739 }
740 }
741
742 /* XXX validate the min clocks required for display */
743
744 if (disable_mclk_switching) {
745 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
746 sclk = ps->performance_levels[0].sclk;
747 } else {
748 mclk = ps->performance_levels[0].mclk;
749 sclk = ps->performance_levels[0].sclk;
750 }
751
752 ps->performance_levels[0].sclk = sclk;
753 ps->performance_levels[0].mclk = mclk;
754
755 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
756 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
757
758 if (disable_mclk_switching) {
759 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
760 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
761 } else {
762 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
763 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
764 }
765}
766
767static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
768 int min_temp, int max_temp)
769{
770 int low_temp = 0 * 1000;
771 int high_temp = 255 * 1000;
772 u32 tmp;
773
774 if (low_temp < min_temp)
775 low_temp = min_temp;
776 if (high_temp > max_temp)
777 high_temp = max_temp;
778 if (high_temp < low_temp) {
779 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
780 return -EINVAL;
781 }
782
783 tmp = RREG32_SMC(CG_THERMAL_INT);
784 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
785 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
786 CI_DIG_THERM_INTL(low_temp / 1000);
787 WREG32_SMC(CG_THERMAL_INT, tmp);
788
789#if 0
790 /* XXX: need to figure out how to handle this properly */
791 tmp = RREG32_SMC(CG_THERMAL_CTRL);
792 tmp &= DIG_THERM_DPM_MASK;
793 tmp |= DIG_THERM_DPM(high_temp / 1000);
794 WREG32_SMC(CG_THERMAL_CTRL, tmp);
795#endif
796
797 return 0;
798}
799
800#if 0
801static int ci_read_smc_soft_register(struct radeon_device *rdev,
802 u16 reg_offset, u32 *value)
803{
804 struct ci_power_info *pi = ci_get_pi(rdev);
805
806 return ci_read_smc_sram_dword(rdev,
807 pi->soft_regs_start + reg_offset,
808 value, pi->sram_end);
809}
810#endif
811
812static int ci_write_smc_soft_register(struct radeon_device *rdev,
813 u16 reg_offset, u32 value)
814{
815 struct ci_power_info *pi = ci_get_pi(rdev);
816
817 return ci_write_smc_sram_dword(rdev,
818 pi->soft_regs_start + reg_offset,
819 value, pi->sram_end);
820}
821
822static void ci_init_fps_limits(struct radeon_device *rdev)
823{
824 struct ci_power_info *pi = ci_get_pi(rdev);
825 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
826
827 if (pi->caps_fps) {
828 u16 tmp;
829
830 tmp = 45;
831 table->FpsHighT = cpu_to_be16(tmp);
832
833 tmp = 30;
834 table->FpsLowT = cpu_to_be16(tmp);
835 }
836}
837
838static int ci_update_sclk_t(struct radeon_device *rdev)
839{
840 struct ci_power_info *pi = ci_get_pi(rdev);
841 int ret = 0;
842 u32 low_sclk_interrupt_t = 0;
843
844 if (pi->caps_sclk_throttle_low_notification) {
845 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
846
847 ret = ci_copy_bytes_to_smc(rdev,
848 pi->dpm_table_start +
849 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
850 (u8 *)&low_sclk_interrupt_t,
851 sizeof(u32), pi->sram_end);
852
853 }
854
855 return ret;
856}
857
858static void ci_get_leakage_voltages(struct radeon_device *rdev)
859{
860 struct ci_power_info *pi = ci_get_pi(rdev);
861 u16 leakage_id, virtual_voltage_id;
862 u16 vddc, vddci;
863 int i;
864
865 pi->vddc_leakage.count = 0;
866 pi->vddci_leakage.count = 0;
867
868 if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
869 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
870 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
871 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
872 virtual_voltage_id,
873 leakage_id) == 0) {
874 if (vddc != 0 && vddc != virtual_voltage_id) {
875 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
876 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
877 pi->vddc_leakage.count++;
878 }
879 if (vddci != 0 && vddci != virtual_voltage_id) {
880 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
881 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
882 pi->vddci_leakage.count++;
883 }
884 }
885 }
886 }
887}
888
889static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
890{
891 struct ci_power_info *pi = ci_get_pi(rdev);
892 bool want_thermal_protection;
893 enum radeon_dpm_event_src dpm_event_src;
894 u32 tmp;
895
896 switch (sources) {
897 case 0:
898 default:
899 want_thermal_protection = false;
900 break;
901 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
902 want_thermal_protection = true;
903 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
904 break;
905 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
906 want_thermal_protection = true;
907 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
908 break;
909 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
910 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
911 want_thermal_protection = true;
912 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
913 break;
914 }
915
916 if (want_thermal_protection) {
917#if 0
918 /* XXX: need to figure out how to handle this properly */
919 tmp = RREG32_SMC(CG_THERMAL_CTRL);
920 tmp &= DPM_EVENT_SRC_MASK;
921 tmp |= DPM_EVENT_SRC(dpm_event_src);
922 WREG32_SMC(CG_THERMAL_CTRL, tmp);
923#endif
924
925 tmp = RREG32_SMC(GENERAL_PWRMGT);
926 if (pi->thermal_protection)
927 tmp &= ~THERMAL_PROTECTION_DIS;
928 else
929 tmp |= THERMAL_PROTECTION_DIS;
930 WREG32_SMC(GENERAL_PWRMGT, tmp);
931 } else {
932 tmp = RREG32_SMC(GENERAL_PWRMGT);
933 tmp |= THERMAL_PROTECTION_DIS;
934 WREG32_SMC(GENERAL_PWRMGT, tmp);
935 }
936}
937
938static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
939 enum radeon_dpm_auto_throttle_src source,
940 bool enable)
941{
942 struct ci_power_info *pi = ci_get_pi(rdev);
943
944 if (enable) {
945 if (!(pi->active_auto_throttle_sources & (1 << source))) {
946 pi->active_auto_throttle_sources |= 1 << source;
947 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
948 }
949 } else {
950 if (pi->active_auto_throttle_sources & (1 << source)) {
951 pi->active_auto_throttle_sources &= ~(1 << source);
952 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
953 }
954 }
955}
956
957static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
958{
959 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
960 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
961}
962
963static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
964{
965 struct ci_power_info *pi = ci_get_pi(rdev);
966 PPSMC_Result smc_result;
967
968 if (!pi->need_update_smu7_dpm_table)
969 return 0;
970
971 if ((!pi->sclk_dpm_key_disabled) &&
972 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
973 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
974 if (smc_result != PPSMC_Result_OK)
975 return -EINVAL;
976 }
977
978 if ((!pi->mclk_dpm_key_disabled) &&
979 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
980 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
981 if (smc_result != PPSMC_Result_OK)
982 return -EINVAL;
983 }
984
985 pi->need_update_smu7_dpm_table = 0;
986 return 0;
987}
988
989static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
990{
991 struct ci_power_info *pi = ci_get_pi(rdev);
992 PPSMC_Result smc_result;
993
994 if (enable) {
995 if (!pi->sclk_dpm_key_disabled) {
996 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
997 if (smc_result != PPSMC_Result_OK)
998 return -EINVAL;
999 }
1000
1001 if (!pi->mclk_dpm_key_disabled) {
1002 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1003 if (smc_result != PPSMC_Result_OK)
1004 return -EINVAL;
1005
1006 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1007
1008 WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1009 WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1010 WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1011
1012 udelay(10);
1013
1014 WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1015 WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1016 WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1017 }
1018 } else {
1019 if (!pi->sclk_dpm_key_disabled) {
1020 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1021 if (smc_result != PPSMC_Result_OK)
1022 return -EINVAL;
1023 }
1024
1025 if (!pi->mclk_dpm_key_disabled) {
1026 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1027 if (smc_result != PPSMC_Result_OK)
1028 return -EINVAL;
1029 }
1030 }
1031
1032 return 0;
1033}
1034
1035static int ci_start_dpm(struct radeon_device *rdev)
1036{
1037 struct ci_power_info *pi = ci_get_pi(rdev);
1038 PPSMC_Result smc_result;
1039 int ret;
1040 u32 tmp;
1041
1042 tmp = RREG32_SMC(GENERAL_PWRMGT);
1043 tmp |= GLOBAL_PWRMGT_EN;
1044 WREG32_SMC(GENERAL_PWRMGT, tmp);
1045
1046 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1047 tmp |= DYNAMIC_PM_EN;
1048 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1049
1050 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1051
1052 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1053
1054 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1055 if (smc_result != PPSMC_Result_OK)
1056 return -EINVAL;
1057
1058 ret = ci_enable_sclk_mclk_dpm(rdev, true);
1059 if (ret)
1060 return ret;
1061
1062 if (!pi->pcie_dpm_key_disabled) {
1063 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1064 if (smc_result != PPSMC_Result_OK)
1065 return -EINVAL;
1066 }
1067
1068 return 0;
1069}
1070
1071static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1072{
1073 struct ci_power_info *pi = ci_get_pi(rdev);
1074 PPSMC_Result smc_result;
1075
1076 if (!pi->need_update_smu7_dpm_table)
1077 return 0;
1078
1079 if ((!pi->sclk_dpm_key_disabled) &&
1080 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1081 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1082 if (smc_result != PPSMC_Result_OK)
1083 return -EINVAL;
1084 }
1085
1086 if ((!pi->mclk_dpm_key_disabled) &&
1087 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1088 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1089 if (smc_result != PPSMC_Result_OK)
1090 return -EINVAL;
1091 }
1092
1093 return 0;
1094}
1095
1096static int ci_stop_dpm(struct radeon_device *rdev)
1097{
1098 struct ci_power_info *pi = ci_get_pi(rdev);
1099 PPSMC_Result smc_result;
1100 int ret;
1101 u32 tmp;
1102
1103 tmp = RREG32_SMC(GENERAL_PWRMGT);
1104 tmp &= ~GLOBAL_PWRMGT_EN;
1105 WREG32_SMC(GENERAL_PWRMGT, tmp);
1106
1107 tmp = RREG32(SCLK_PWRMGT_CNTL);
1108 tmp &= ~DYNAMIC_PM_EN;
1109 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1110
1111 if (!pi->pcie_dpm_key_disabled) {
1112 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1113 if (smc_result != PPSMC_Result_OK)
1114 return -EINVAL;
1115 }
1116
1117 ret = ci_enable_sclk_mclk_dpm(rdev, false);
1118 if (ret)
1119 return ret;
1120
1121 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1122 if (smc_result != PPSMC_Result_OK)
1123 return -EINVAL;
1124
1125 return 0;
1126}
1127
1128static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1129{
1130 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1131
1132 if (enable)
1133 tmp &= ~SCLK_PWRMGT_OFF;
1134 else
1135 tmp |= SCLK_PWRMGT_OFF;
1136 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1137}
1138
1139#if 0
1140static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1141 bool ac_power)
1142{
1143 struct ci_power_info *pi = ci_get_pi(rdev);
1144 struct radeon_cac_tdp_table *cac_tdp_table =
1145 rdev->pm.dpm.dyn_state.cac_tdp_table;
1146 u32 power_limit;
1147
1148 if (ac_power)
1149 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1150 else
1151 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1152
1153 ci_set_power_limit(rdev, power_limit);
1154
1155 if (pi->caps_automatic_dc_transition) {
1156 if (ac_power)
1157 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1158 else
1159 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1160 }
1161
1162 return 0;
1163}
1164#endif
1165
1166static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1167 PPSMC_Msg msg, u32 parameter)
1168{
1169 WREG32(SMC_MSG_ARG_0, parameter);
1170 return ci_send_msg_to_smc(rdev, msg);
1171}
1172
1173static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1174 PPSMC_Msg msg, u32 *parameter)
1175{
1176 PPSMC_Result smc_result;
1177
1178 smc_result = ci_send_msg_to_smc(rdev, msg);
1179
1180 if ((smc_result == PPSMC_Result_OK) && parameter)
1181 *parameter = RREG32(SMC_MSG_ARG_0);
1182
1183 return smc_result;
1184}
1185
1186static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1187{
1188 struct ci_power_info *pi = ci_get_pi(rdev);
1189
1190 if (!pi->sclk_dpm_key_disabled) {
1191 PPSMC_Result smc_result =
1192 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1193 if (smc_result != PPSMC_Result_OK)
1194 return -EINVAL;
1195 }
1196
1197 return 0;
1198}
1199
1200static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1201{
1202 struct ci_power_info *pi = ci_get_pi(rdev);
1203
1204 if (!pi->mclk_dpm_key_disabled) {
1205 PPSMC_Result smc_result =
1206 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1207 if (smc_result != PPSMC_Result_OK)
1208 return -EINVAL;
1209 }
1210
1211 return 0;
1212}
1213
1214static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1215{
1216 struct ci_power_info *pi = ci_get_pi(rdev);
1217
1218 if (!pi->pcie_dpm_key_disabled) {
1219 PPSMC_Result smc_result =
1220 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1221 if (smc_result != PPSMC_Result_OK)
1222 return -EINVAL;
1223 }
1224
1225 return 0;
1226}
1227
1228static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1229{
1230 struct ci_power_info *pi = ci_get_pi(rdev);
1231
1232 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1233 PPSMC_Result smc_result =
1234 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1235 if (smc_result != PPSMC_Result_OK)
1236 return -EINVAL;
1237 }
1238
1239 return 0;
1240}
1241
1242static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1243 u32 target_tdp)
1244{
1245 PPSMC_Result smc_result =
1246 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1247 if (smc_result != PPSMC_Result_OK)
1248 return -EINVAL;
1249 return 0;
1250}
1251
1252static int ci_set_boot_state(struct radeon_device *rdev)
1253{
1254 return ci_enable_sclk_mclk_dpm(rdev, false);
1255}
1256
1257static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1258{
1259 u32 sclk_freq;
1260 PPSMC_Result smc_result =
1261 ci_send_msg_to_smc_return_parameter(rdev,
1262 PPSMC_MSG_API_GetSclkFrequency,
1263 &sclk_freq);
1264 if (smc_result != PPSMC_Result_OK)
1265 sclk_freq = 0;
1266
1267 return sclk_freq;
1268}
1269
1270static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1271{
1272 u32 mclk_freq;
1273 PPSMC_Result smc_result =
1274 ci_send_msg_to_smc_return_parameter(rdev,
1275 PPSMC_MSG_API_GetMclkFrequency,
1276 &mclk_freq);
1277 if (smc_result != PPSMC_Result_OK)
1278 mclk_freq = 0;
1279
1280 return mclk_freq;
1281}
1282
1283static void ci_dpm_start_smc(struct radeon_device *rdev)
1284{
1285 int i;
1286
1287 ci_program_jump_on_start(rdev);
1288 ci_start_smc_clock(rdev);
1289 ci_start_smc(rdev);
1290 for (i = 0; i < rdev->usec_timeout; i++) {
1291 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1292 break;
1293 }
1294}
1295
1296static void ci_dpm_stop_smc(struct radeon_device *rdev)
1297{
1298 ci_reset_smc(rdev);
1299 ci_stop_smc_clock(rdev);
1300}
1301
1302static int ci_process_firmware_header(struct radeon_device *rdev)
1303{
1304 struct ci_power_info *pi = ci_get_pi(rdev);
1305 u32 tmp;
1306 int ret;
1307
1308 ret = ci_read_smc_sram_dword(rdev,
1309 SMU7_FIRMWARE_HEADER_LOCATION +
1310 offsetof(SMU7_Firmware_Header, DpmTable),
1311 &tmp, pi->sram_end);
1312 if (ret)
1313 return ret;
1314
1315 pi->dpm_table_start = tmp;
1316
1317 ret = ci_read_smc_sram_dword(rdev,
1318 SMU7_FIRMWARE_HEADER_LOCATION +
1319 offsetof(SMU7_Firmware_Header, SoftRegisters),
1320 &tmp, pi->sram_end);
1321 if (ret)
1322 return ret;
1323
1324 pi->soft_regs_start = tmp;
1325
1326 ret = ci_read_smc_sram_dword(rdev,
1327 SMU7_FIRMWARE_HEADER_LOCATION +
1328 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1329 &tmp, pi->sram_end);
1330 if (ret)
1331 return ret;
1332
1333 pi->mc_reg_table_start = tmp;
1334
1335 ret = ci_read_smc_sram_dword(rdev,
1336 SMU7_FIRMWARE_HEADER_LOCATION +
1337 offsetof(SMU7_Firmware_Header, FanTable),
1338 &tmp, pi->sram_end);
1339 if (ret)
1340 return ret;
1341
1342 pi->fan_table_start = tmp;
1343
1344 ret = ci_read_smc_sram_dword(rdev,
1345 SMU7_FIRMWARE_HEADER_LOCATION +
1346 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1347 &tmp, pi->sram_end);
1348 if (ret)
1349 return ret;
1350
1351 pi->arb_table_start = tmp;
1352
1353 return 0;
1354}
1355
1356static void ci_read_clock_registers(struct radeon_device *rdev)
1357{
1358 struct ci_power_info *pi = ci_get_pi(rdev);
1359
1360 pi->clock_registers.cg_spll_func_cntl =
1361 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1362 pi->clock_registers.cg_spll_func_cntl_2 =
1363 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1364 pi->clock_registers.cg_spll_func_cntl_3 =
1365 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1366 pi->clock_registers.cg_spll_func_cntl_4 =
1367 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1368 pi->clock_registers.cg_spll_spread_spectrum =
1369 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1370 pi->clock_registers.cg_spll_spread_spectrum_2 =
1371 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1372 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1373 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1374 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1375 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1376 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1377 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1378 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1379 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1380 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1381}
1382
1383static void ci_init_sclk_t(struct radeon_device *rdev)
1384{
1385 struct ci_power_info *pi = ci_get_pi(rdev);
1386
1387 pi->low_sclk_interrupt_t = 0;
1388}
1389
1390static void ci_enable_thermal_protection(struct radeon_device *rdev,
1391 bool enable)
1392{
1393 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1394
1395 if (enable)
1396 tmp &= ~THERMAL_PROTECTION_DIS;
1397 else
1398 tmp |= THERMAL_PROTECTION_DIS;
1399 WREG32_SMC(GENERAL_PWRMGT, tmp);
1400}
1401
1402static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1403{
1404 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1405
1406 tmp |= STATIC_PM_EN;
1407
1408 WREG32_SMC(GENERAL_PWRMGT, tmp);
1409}
1410
1411#if 0
1412static int ci_enter_ulp_state(struct radeon_device *rdev)
1413{
1414
1415 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1416
1417 udelay(25000);
1418
1419 return 0;
1420}
1421
1422static int ci_exit_ulp_state(struct radeon_device *rdev)
1423{
1424 int i;
1425
1426 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1427
1428 udelay(7000);
1429
1430 for (i = 0; i < rdev->usec_timeout; i++) {
1431 if (RREG32(SMC_RESP_0) == 1)
1432 break;
1433 udelay(1000);
1434 }
1435
1436 return 0;
1437}
1438#endif
1439
1440static int ci_notify_smc_display_change(struct radeon_device *rdev,
1441 bool has_display)
1442{
1443 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1444
1445 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
1446}
1447
1448static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1449 bool enable)
1450{
1451 struct ci_power_info *pi = ci_get_pi(rdev);
1452
1453 if (enable) {
1454 if (pi->caps_sclk_ds) {
1455 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1456 return -EINVAL;
1457 } else {
1458 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1459 return -EINVAL;
1460 }
1461 } else {
1462 if (pi->caps_sclk_ds) {
1463 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1464 return -EINVAL;
1465 }
1466 }
1467
1468 return 0;
1469}
1470
1471static void ci_program_display_gap(struct radeon_device *rdev)
1472{
1473 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1474 u32 pre_vbi_time_in_us;
1475 u32 frame_time_in_us;
1476 u32 ref_clock = rdev->clock.spll.reference_freq;
1477 u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1478 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1479
1480 tmp &= ~DISP_GAP_MASK;
1481 if (rdev->pm.dpm.new_active_crtc_count > 0)
1482 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1483 else
1484 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1485 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1486
1487 if (refresh_rate == 0)
1488 refresh_rate = 60;
1489 if (vblank_time == 0xffffffff)
1490 vblank_time = 500;
1491 frame_time_in_us = 1000000 / refresh_rate;
1492 pre_vbi_time_in_us =
1493 frame_time_in_us - 200 - vblank_time;
1494 tmp = pre_vbi_time_in_us * (ref_clock / 100);
1495
1496 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1497 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1498 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1499
1500
1501 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1502
1503}
1504
1505static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1506{
1507 struct ci_power_info *pi = ci_get_pi(rdev);
1508 u32 tmp;
1509
1510 if (enable) {
1511 if (pi->caps_sclk_ss_support) {
1512 tmp = RREG32_SMC(GENERAL_PWRMGT);
1513 tmp |= DYN_SPREAD_SPECTRUM_EN;
1514 WREG32_SMC(GENERAL_PWRMGT, tmp);
1515 }
1516 } else {
1517 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1518 tmp &= ~SSEN;
1519 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1520
1521 tmp = RREG32_SMC(GENERAL_PWRMGT);
1522 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1523 WREG32_SMC(GENERAL_PWRMGT, tmp);
1524 }
1525}
1526
1527static void ci_program_sstp(struct radeon_device *rdev)
1528{
1529 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1530}
1531
1532static void ci_enable_display_gap(struct radeon_device *rdev)
1533{
1534 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1535
1536 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1537 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1538 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1539
1540 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1541}
1542
1543static void ci_program_vc(struct radeon_device *rdev)
1544{
1545 u32 tmp;
1546
1547 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1548 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1549 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1550
1551 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1552 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1553 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1554 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1555 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1556 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1557 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1558 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1559}
1560
1561static void ci_clear_vc(struct radeon_device *rdev)
1562{
1563 u32 tmp;
1564
1565 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1566 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1567 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1568
1569 WREG32_SMC(CG_FTV_0, 0);
1570 WREG32_SMC(CG_FTV_1, 0);
1571 WREG32_SMC(CG_FTV_2, 0);
1572 WREG32_SMC(CG_FTV_3, 0);
1573 WREG32_SMC(CG_FTV_4, 0);
1574 WREG32_SMC(CG_FTV_5, 0);
1575 WREG32_SMC(CG_FTV_6, 0);
1576 WREG32_SMC(CG_FTV_7, 0);
1577}
1578
1579static int ci_upload_firmware(struct radeon_device *rdev)
1580{
1581 struct ci_power_info *pi = ci_get_pi(rdev);
1582 int i, ret;
1583
1584 for (i = 0; i < rdev->usec_timeout; i++) {
1585 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1586 break;
1587 }
1588 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1589
1590 ci_stop_smc_clock(rdev);
1591 ci_reset_smc(rdev);
1592
1593 ret = ci_load_smc_ucode(rdev, pi->sram_end);
1594
1595 return ret;
1596
1597}
1598
1599static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1600 struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1601 struct atom_voltage_table *voltage_table)
1602{
1603 u32 i;
1604
1605 if (voltage_dependency_table == NULL)
1606 return -EINVAL;
1607
1608 voltage_table->mask_low = 0;
1609 voltage_table->phase_delay = 0;
1610
1611 voltage_table->count = voltage_dependency_table->count;
1612 for (i = 0; i < voltage_table->count; i++) {
1613 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1614 voltage_table->entries[i].smio_low = 0;
1615 }
1616
1617 return 0;
1618}
1619
1620static int ci_construct_voltage_tables(struct radeon_device *rdev)
1621{
1622 struct ci_power_info *pi = ci_get_pi(rdev);
1623 int ret;
1624
1625 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1626 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1627 VOLTAGE_OBJ_GPIO_LUT,
1628 &pi->vddc_voltage_table);
1629 if (ret)
1630 return ret;
1631 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1632 ret = ci_get_svi2_voltage_table(rdev,
1633 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1634 &pi->vddc_voltage_table);
1635 if (ret)
1636 return ret;
1637 }
1638
1639 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1640 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1641 &pi->vddc_voltage_table);
1642
1643 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1644 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1645 VOLTAGE_OBJ_GPIO_LUT,
1646 &pi->vddci_voltage_table);
1647 if (ret)
1648 return ret;
1649 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1650 ret = ci_get_svi2_voltage_table(rdev,
1651 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1652 &pi->vddci_voltage_table);
1653 if (ret)
1654 return ret;
1655 }
1656
1657 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1658 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1659 &pi->vddci_voltage_table);
1660
1661 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1662 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1663 VOLTAGE_OBJ_GPIO_LUT,
1664 &pi->mvdd_voltage_table);
1665 if (ret)
1666 return ret;
1667 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1668 ret = ci_get_svi2_voltage_table(rdev,
1669 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1670 &pi->mvdd_voltage_table);
1671 if (ret)
1672 return ret;
1673 }
1674
1675 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1676 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1677 &pi->mvdd_voltage_table);
1678
1679 return 0;
1680}
1681
1682static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1683 struct atom_voltage_table_entry *voltage_table,
1684 SMU7_Discrete_VoltageLevel *smc_voltage_table)
1685{
1686 int ret;
1687
1688 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1689 &smc_voltage_table->StdVoltageHiSidd,
1690 &smc_voltage_table->StdVoltageLoSidd);
1691
1692 if (ret) {
1693 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1694 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1695 }
1696
1697 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1698 smc_voltage_table->StdVoltageHiSidd =
1699 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1700 smc_voltage_table->StdVoltageLoSidd =
1701 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1702}
1703
1704static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1705 SMU7_Discrete_DpmTable *table)
1706{
1707 struct ci_power_info *pi = ci_get_pi(rdev);
1708 unsigned int count;
1709
1710 table->VddcLevelCount = pi->vddc_voltage_table.count;
1711 for (count = 0; count < table->VddcLevelCount; count++) {
1712 ci_populate_smc_voltage_table(rdev,
1713 &pi->vddc_voltage_table.entries[count],
1714 &table->VddcLevel[count]);
1715
1716 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1717 table->VddcLevel[count].Smio |=
1718 pi->vddc_voltage_table.entries[count].smio_low;
1719 else
1720 table->VddcLevel[count].Smio = 0;
1721 }
1722 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1723
1724 return 0;
1725}
1726
1727static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1728 SMU7_Discrete_DpmTable *table)
1729{
1730 unsigned int count;
1731 struct ci_power_info *pi = ci_get_pi(rdev);
1732
1733 table->VddciLevelCount = pi->vddci_voltage_table.count;
1734 for (count = 0; count < table->VddciLevelCount; count++) {
1735 ci_populate_smc_voltage_table(rdev,
1736 &pi->vddci_voltage_table.entries[count],
1737 &table->VddciLevel[count]);
1738
1739 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1740 table->VddciLevel[count].Smio |=
1741 pi->vddci_voltage_table.entries[count].smio_low;
1742 else
1743 table->VddciLevel[count].Smio = 0;
1744 }
1745 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1746
1747 return 0;
1748}
1749
1750static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1751 SMU7_Discrete_DpmTable *table)
1752{
1753 struct ci_power_info *pi = ci_get_pi(rdev);
1754 unsigned int count;
1755
1756 table->MvddLevelCount = pi->mvdd_voltage_table.count;
1757 for (count = 0; count < table->MvddLevelCount; count++) {
1758 ci_populate_smc_voltage_table(rdev,
1759 &pi->mvdd_voltage_table.entries[count],
1760 &table->MvddLevel[count]);
1761
1762 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1763 table->MvddLevel[count].Smio |=
1764 pi->mvdd_voltage_table.entries[count].smio_low;
1765 else
1766 table->MvddLevel[count].Smio = 0;
1767 }
1768 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1769
1770 return 0;
1771}
1772
1773static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1774 SMU7_Discrete_DpmTable *table)
1775{
1776 int ret;
1777
1778 ret = ci_populate_smc_vddc_table(rdev, table);
1779 if (ret)
1780 return ret;
1781
1782 ret = ci_populate_smc_vddci_table(rdev, table);
1783 if (ret)
1784 return ret;
1785
1786 ret = ci_populate_smc_mvdd_table(rdev, table);
1787 if (ret)
1788 return ret;
1789
1790 return 0;
1791}
1792
1793static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1794 SMU7_Discrete_VoltageLevel *voltage)
1795{
1796 struct ci_power_info *pi = ci_get_pi(rdev);
1797 u32 i = 0;
1798
1799 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1800 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1801 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1802 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1803 break;
1804 }
1805 }
1806
1807 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1808 return -EINVAL;
1809 }
1810
1811 return -EINVAL;
1812}
1813
1814static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1815 struct atom_voltage_table_entry *voltage_table,
1816 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1817{
1818 u16 v_index, idx;
1819 bool voltage_found = false;
1820 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1821 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1822
1823 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1824 return -EINVAL;
1825
1826 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1827 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1828 if (voltage_table->value ==
1829 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1830 voltage_found = true;
1831 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1832 idx = v_index;
1833 else
1834 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1835 *std_voltage_lo_sidd =
1836 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1837 *std_voltage_hi_sidd =
1838 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1839 break;
1840 }
1841 }
1842
1843 if (!voltage_found) {
1844 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1845 if (voltage_table->value <=
1846 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1847 voltage_found = true;
1848 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1849 idx = v_index;
1850 else
1851 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1852 *std_voltage_lo_sidd =
1853 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1854 *std_voltage_hi_sidd =
1855 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1856 break;
1857 }
1858 }
1859 }
1860 }
1861
1862 return 0;
1863}
1864
1865static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1866 const struct radeon_phase_shedding_limits_table *limits,
1867 u32 sclk,
1868 u32 *phase_shedding)
1869{
1870 unsigned int i;
1871
1872 *phase_shedding = 1;
1873
1874 for (i = 0; i < limits->count; i++) {
1875 if (sclk < limits->entries[i].sclk) {
1876 *phase_shedding = i;
1877 break;
1878 }
1879 }
1880}
1881
1882static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1883 const struct radeon_phase_shedding_limits_table *limits,
1884 u32 mclk,
1885 u32 *phase_shedding)
1886{
1887 unsigned int i;
1888
1889 *phase_shedding = 1;
1890
1891 for (i = 0; i < limits->count; i++) {
1892 if (mclk < limits->entries[i].mclk) {
1893 *phase_shedding = i;
1894 break;
1895 }
1896 }
1897}
1898
1899static int ci_init_arb_table_index(struct radeon_device *rdev)
1900{
1901 struct ci_power_info *pi = ci_get_pi(rdev);
1902 u32 tmp;
1903 int ret;
1904
1905 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1906 &tmp, pi->sram_end);
1907 if (ret)
1908 return ret;
1909
1910 tmp &= 0x00FFFFFF;
1911 tmp |= MC_CG_ARB_FREQ_F1 << 24;
1912
1913 return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1914 tmp, pi->sram_end);
1915}
1916
1917static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1918 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1919 u32 clock, u32 *voltage)
1920{
1921 u32 i = 0;
1922
1923 if (allowed_clock_voltage_table->count == 0)
1924 return -EINVAL;
1925
1926 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1927 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1928 *voltage = allowed_clock_voltage_table->entries[i].v;
1929 return 0;
1930 }
1931 }
1932
1933 *voltage = allowed_clock_voltage_table->entries[i-1].v;
1934
1935 return 0;
1936}
1937
1938static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1939 u32 sclk, u32 min_sclk_in_sr)
1940{
1941 u32 i;
1942 u32 tmp;
1943 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
1944 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
1945
1946 if (sclk < min)
1947 return 0;
1948
1949 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1950 tmp = sclk / (1 << i);
1951 if (tmp >= min || i == 0)
1952 break;
1953 }
1954
1955 return (u8)i;
1956}
1957
1958static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
1959{
1960 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1961}
1962
1963static int ci_reset_to_default(struct radeon_device *rdev)
1964{
1965 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
1966 0 : -EINVAL;
1967}
1968
1969static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
1970{
1971 u32 tmp;
1972
1973 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
1974
1975 if (tmp == MC_CG_ARB_FREQ_F0)
1976 return 0;
1977
1978 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
1979}
1980
1981static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
1982 u32 sclk,
1983 u32 mclk,
1984 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
1985{
1986 u32 dram_timing;
1987 u32 dram_timing2;
1988 u32 burst_time;
1989
1990 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
1991
1992 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1993 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1994 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
1995
1996 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
1997 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
1998 arb_regs->McArbBurstTime = (u8)burst_time;
1999
2000 return 0;
2001}
2002
2003static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2004{
2005 struct ci_power_info *pi = ci_get_pi(rdev);
2006 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2007 u32 i, j;
2008 int ret = 0;
2009
2010 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2011
2012 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2013 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2014 ret = ci_populate_memory_timing_parameters(rdev,
2015 pi->dpm_table.sclk_table.dpm_levels[i].value,
2016 pi->dpm_table.mclk_table.dpm_levels[j].value,
2017 &arb_regs.entries[i][j]);
2018 if (ret)
2019 break;
2020 }
2021 }
2022
2023 if (ret == 0)
2024 ret = ci_copy_bytes_to_smc(rdev,
2025 pi->arb_table_start,
2026 (u8 *)&arb_regs,
2027 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2028 pi->sram_end);
2029
2030 return ret;
2031}
2032
2033static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2034{
2035 struct ci_power_info *pi = ci_get_pi(rdev);
2036
2037 if (pi->need_update_smu7_dpm_table == 0)
2038 return 0;
2039
2040 return ci_do_program_memory_timing_parameters(rdev);
2041}
2042
2043static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2044 struct radeon_ps *radeon_boot_state)
2045{
2046 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2047 struct ci_power_info *pi = ci_get_pi(rdev);
2048 u32 level = 0;
2049
2050 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2051 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2052 boot_state->performance_levels[0].sclk) {
2053 pi->smc_state_table.GraphicsBootLevel = level;
2054 break;
2055 }
2056 }
2057
2058 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2059 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2060 boot_state->performance_levels[0].mclk) {
2061 pi->smc_state_table.MemoryBootLevel = level;
2062 break;
2063 }
2064 }
2065}
2066
2067static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2068{
2069 u32 i;
2070 u32 mask_value = 0;
2071
2072 for (i = dpm_table->count; i > 0; i--) {
2073 mask_value = mask_value << 1;
2074 if (dpm_table->dpm_levels[i-1].enabled)
2075 mask_value |= 0x1;
2076 else
2077 mask_value &= 0xFFFFFFFE;
2078 }
2079
2080 return mask_value;
2081}
2082
2083static void ci_populate_smc_link_level(struct radeon_device *rdev,
2084 SMU7_Discrete_DpmTable *table)
2085{
2086 struct ci_power_info *pi = ci_get_pi(rdev);
2087 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2088 u32 i;
2089
2090 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2091 table->LinkLevel[i].PcieGenSpeed =
2092 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2093 table->LinkLevel[i].PcieLaneCount =
2094 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2095 table->LinkLevel[i].EnabledForActivity = 1;
2096 table->LinkLevel[i].DownT = cpu_to_be32(5);
2097 table->LinkLevel[i].UpT = cpu_to_be32(30);
2098 }
2099
2100 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2101 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2102 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2103}
2104
2105static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2106 SMU7_Discrete_DpmTable *table)
2107{
2108 u32 count;
2109 struct atom_clock_dividers dividers;
2110 int ret = -EINVAL;
2111
2112 table->UvdLevelCount =
2113 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2114
2115 for (count = 0; count < table->UvdLevelCount; count++) {
2116 table->UvdLevel[count].VclkFrequency =
2117 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2118 table->UvdLevel[count].DclkFrequency =
2119 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2120 table->UvdLevel[count].MinVddc =
2121 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2122 table->UvdLevel[count].MinVddcPhases = 1;
2123
2124 ret = radeon_atom_get_clock_dividers(rdev,
2125 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2126 table->UvdLevel[count].VclkFrequency, false, &dividers);
2127 if (ret)
2128 return ret;
2129
2130 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2131
2132 ret = radeon_atom_get_clock_dividers(rdev,
2133 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2134 table->UvdLevel[count].DclkFrequency, false, &dividers);
2135 if (ret)
2136 return ret;
2137
2138 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2139
2140 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2141 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2142 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2143 }
2144
2145 return ret;
2146}
2147
2148static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2149 SMU7_Discrete_DpmTable *table)
2150{
2151 u32 count;
2152 struct atom_clock_dividers dividers;
2153 int ret = -EINVAL;
2154
2155 table->VceLevelCount =
2156 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2157
2158 for (count = 0; count < table->VceLevelCount; count++) {
2159 table->VceLevel[count].Frequency =
2160 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2161 table->VceLevel[count].MinVoltage =
2162 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2163 table->VceLevel[count].MinPhases = 1;
2164
2165 ret = radeon_atom_get_clock_dividers(rdev,
2166 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2167 table->VceLevel[count].Frequency, false, &dividers);
2168 if (ret)
2169 return ret;
2170
2171 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2172
2173 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2174 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2175 }
2176
2177 return ret;
2178
2179}
2180
2181static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2182 SMU7_Discrete_DpmTable *table)
2183{
2184 u32 count;
2185 struct atom_clock_dividers dividers;
2186 int ret = -EINVAL;
2187
2188 table->AcpLevelCount = (u8)
2189 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2190
2191 for (count = 0; count < table->AcpLevelCount; count++) {
2192 table->AcpLevel[count].Frequency =
2193 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2194 table->AcpLevel[count].MinVoltage =
2195 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2196 table->AcpLevel[count].MinPhases = 1;
2197
2198 ret = radeon_atom_get_clock_dividers(rdev,
2199 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2200 table->AcpLevel[count].Frequency, false, &dividers);
2201 if (ret)
2202 return ret;
2203
2204 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2205
2206 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2207 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2208 }
2209
2210 return ret;
2211}
2212
2213static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2214 SMU7_Discrete_DpmTable *table)
2215{
2216 u32 count;
2217 struct atom_clock_dividers dividers;
2218 int ret = -EINVAL;
2219
2220 table->SamuLevelCount =
2221 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2222
2223 for (count = 0; count < table->SamuLevelCount; count++) {
2224 table->SamuLevel[count].Frequency =
2225 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2226 table->SamuLevel[count].MinVoltage =
2227 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2228 table->SamuLevel[count].MinPhases = 1;
2229
2230 ret = radeon_atom_get_clock_dividers(rdev,
2231 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2232 table->SamuLevel[count].Frequency, false, &dividers);
2233 if (ret)
2234 return ret;
2235
2236 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2237
2238 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2239 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2240 }
2241
2242 return ret;
2243}
2244
2245static int ci_calculate_mclk_params(struct radeon_device *rdev,
2246 u32 memory_clock,
2247 SMU7_Discrete_MemoryLevel *mclk,
2248 bool strobe_mode,
2249 bool dll_state_on)
2250{
2251 struct ci_power_info *pi = ci_get_pi(rdev);
2252 u32 dll_cntl = pi->clock_registers.dll_cntl;
2253 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2254 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2255 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2256 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2257 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2258 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2259 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2260 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2261 struct atom_mpll_param mpll_param;
2262 int ret;
2263
2264 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2265 if (ret)
2266 return ret;
2267
2268 mpll_func_cntl &= ~BWCTRL_MASK;
2269 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2270
2271 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2272 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2273 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2274
2275 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2276 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2277
2278 if (pi->mem_gddr5) {
2279 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2280 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2281 YCLK_POST_DIV(mpll_param.post_div);
2282 }
2283
2284 if (pi->caps_mclk_ss_support) {
2285 struct radeon_atom_ss ss;
2286 u32 freq_nom;
2287 u32 tmp;
2288 u32 reference_clock = rdev->clock.mpll.reference_freq;
2289
2290 if (pi->mem_gddr5)
2291 freq_nom = memory_clock * 4;
2292 else
2293 freq_nom = memory_clock * 2;
2294
2295 tmp = (freq_nom / reference_clock);
2296 tmp = tmp * tmp;
2297 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2298 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2299 u32 clks = reference_clock * 5 / ss.rate;
2300 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2301
2302 mpll_ss1 &= ~CLKV_MASK;
2303 mpll_ss1 |= CLKV(clkv);
2304
2305 mpll_ss2 &= ~CLKS_MASK;
2306 mpll_ss2 |= CLKS(clks);
2307 }
2308 }
2309
2310 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2311 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2312
2313 if (dll_state_on)
2314 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2315 else
2316 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2317
2318 mclk->MclkFrequency = memory_clock;
2319 mclk->MpllFuncCntl = mpll_func_cntl;
2320 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2321 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2322 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2323 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2324 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2325 mclk->DllCntl = dll_cntl;
2326 mclk->MpllSs1 = mpll_ss1;
2327 mclk->MpllSs2 = mpll_ss2;
2328
2329 return 0;
2330}
2331
2332static int ci_populate_single_memory_level(struct radeon_device *rdev,
2333 u32 memory_clock,
2334 SMU7_Discrete_MemoryLevel *memory_level)
2335{
2336 struct ci_power_info *pi = ci_get_pi(rdev);
2337 int ret;
2338 bool dll_state_on;
2339
2340 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2341 ret = ci_get_dependency_volt_by_clk(rdev,
2342 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2343 memory_clock, &memory_level->MinVddc);
2344 if (ret)
2345 return ret;
2346 }
2347
2348 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2349 ret = ci_get_dependency_volt_by_clk(rdev,
2350 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2351 memory_clock, &memory_level->MinVddci);
2352 if (ret)
2353 return ret;
2354 }
2355
2356 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2357 ret = ci_get_dependency_volt_by_clk(rdev,
2358 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2359 memory_clock, &memory_level->MinMvdd);
2360 if (ret)
2361 return ret;
2362 }
2363
2364 memory_level->MinVddcPhases = 1;
2365
2366 if (pi->vddc_phase_shed_control)
2367 ci_populate_phase_value_based_on_mclk(rdev,
2368 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2369 memory_clock,
2370 &memory_level->MinVddcPhases);
2371
2372 memory_level->EnabledForThrottle = 1;
2373 memory_level->EnabledForActivity = 1;
2374 memory_level->UpH = 0;
2375 memory_level->DownH = 100;
2376 memory_level->VoltageDownH = 0;
2377 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2378
2379 memory_level->StutterEnable = false;
2380 memory_level->StrobeEnable = false;
2381 memory_level->EdcReadEnable = false;
2382 memory_level->EdcWriteEnable = false;
2383 memory_level->RttEnable = false;
2384
2385 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2386
2387 if (pi->mclk_stutter_mode_threshold &&
2388 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2389 (pi->uvd_enabled == false) &&
2390 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2391 (rdev->pm.dpm.new_active_crtc_count <= 2))
2392 memory_level->StutterEnable = true;
2393
2394 if (pi->mclk_strobe_mode_threshold &&
2395 (memory_clock <= pi->mclk_strobe_mode_threshold))
2396 memory_level->StrobeEnable = 1;
2397
2398 if (pi->mem_gddr5) {
2399 memory_level->StrobeRatio =
2400 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2401 if (pi->mclk_edc_enable_threshold &&
2402 (memory_clock > pi->mclk_edc_enable_threshold))
2403 memory_level->EdcReadEnable = true;
2404
2405 if (pi->mclk_edc_wr_enable_threshold &&
2406 (memory_clock > pi->mclk_edc_wr_enable_threshold))
2407 memory_level->EdcWriteEnable = true;
2408
2409 if (memory_level->StrobeEnable) {
2410 if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2411 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2412 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2413 else
2414 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2415 } else {
2416 dll_state_on = pi->dll_default_on;
2417 }
2418 } else {
2419 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2420 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2421 }
2422
2423 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2424 if (ret)
2425 return ret;
2426
2427 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2428 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2429 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2430 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2431
2432 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2433 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2434 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2435 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2436 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2437 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2438 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2439 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2440 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2441 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2442 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2443
2444 return 0;
2445}
2446
2447static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2448 SMU7_Discrete_DpmTable *table)
2449{
2450 struct ci_power_info *pi = ci_get_pi(rdev);
2451 struct atom_clock_dividers dividers;
2452 SMU7_Discrete_VoltageLevel voltage_level;
2453 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2454 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2455 u32 dll_cntl = pi->clock_registers.dll_cntl;
2456 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2457 int ret;
2458
2459 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2460
2461 if (pi->acpi_vddc)
2462 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2463 else
2464 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2465
2466 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2467
2468 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2469
2470 ret = radeon_atom_get_clock_dividers(rdev,
2471 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2472 table->ACPILevel.SclkFrequency, false, &dividers);
2473 if (ret)
2474 return ret;
2475
2476 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2477 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2478 table->ACPILevel.DeepSleepDivId = 0;
2479
2480 spll_func_cntl &= ~SPLL_PWRON;
2481 spll_func_cntl |= SPLL_RESET;
2482
2483 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2484 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2485
2486 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2487 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2488 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2489 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2490 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2491 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2492 table->ACPILevel.CcPwrDynRm = 0;
2493 table->ACPILevel.CcPwrDynRm1 = 0;
2494
2495 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2496 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2497 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2498 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2499 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2500 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2501 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2502 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2503 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2504 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2505 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2506
2507 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2508 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2509
2510 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2511 if (pi->acpi_vddci)
2512 table->MemoryACPILevel.MinVddci =
2513 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2514 else
2515 table->MemoryACPILevel.MinVddci =
2516 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2517 }
2518
2519 if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2520 table->MemoryACPILevel.MinMvdd = 0;
2521 else
2522 table->MemoryACPILevel.MinMvdd =
2523 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2524
2525 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2526 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2527
2528 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2529
2530 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2531 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2532 table->MemoryACPILevel.MpllAdFuncCntl =
2533 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2534 table->MemoryACPILevel.MpllDqFuncCntl =
2535 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2536 table->MemoryACPILevel.MpllFuncCntl =
2537 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2538 table->MemoryACPILevel.MpllFuncCntl_1 =
2539 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2540 table->MemoryACPILevel.MpllFuncCntl_2 =
2541 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2542 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2543 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2544
2545 table->MemoryACPILevel.EnabledForThrottle = 0;
2546 table->MemoryACPILevel.EnabledForActivity = 0;
2547 table->MemoryACPILevel.UpH = 0;
2548 table->MemoryACPILevel.DownH = 100;
2549 table->MemoryACPILevel.VoltageDownH = 0;
2550 table->MemoryACPILevel.ActivityLevel =
2551 cpu_to_be16((u16)pi->mclk_activity_target);
2552
2553 table->MemoryACPILevel.StutterEnable = false;
2554 table->MemoryACPILevel.StrobeEnable = false;
2555 table->MemoryACPILevel.EdcReadEnable = false;
2556 table->MemoryACPILevel.EdcWriteEnable = false;
2557 table->MemoryACPILevel.RttEnable = false;
2558
2559 return 0;
2560}
2561
2562
2563static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2564{
2565 struct ci_power_info *pi = ci_get_pi(rdev);
2566 struct ci_ulv_parm *ulv = &pi->ulv;
2567
2568 if (ulv->supported) {
2569 if (enable)
2570 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2571 0 : -EINVAL;
2572 else
2573 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2574 0 : -EINVAL;
2575 }
2576
2577 return 0;
2578}
2579
2580static int ci_populate_ulv_level(struct radeon_device *rdev,
2581 SMU7_Discrete_Ulv *state)
2582{
2583 struct ci_power_info *pi = ci_get_pi(rdev);
2584 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2585
2586 state->CcPwrDynRm = 0;
2587 state->CcPwrDynRm1 = 0;
2588
2589 if (ulv_voltage == 0) {
2590 pi->ulv.supported = false;
2591 return 0;
2592 }
2593
2594 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2595 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2596 state->VddcOffset = 0;
2597 else
2598 state->VddcOffset =
2599 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2600 } else {
2601 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2602 state->VddcOffsetVid = 0;
2603 else
2604 state->VddcOffsetVid = (u8)
2605 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2606 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2607 }
2608 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2609
2610 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2611 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2612 state->VddcOffset = cpu_to_be16(state->VddcOffset);
2613
2614 return 0;
2615}
2616
2617static int ci_calculate_sclk_params(struct radeon_device *rdev,
2618 u32 engine_clock,
2619 SMU7_Discrete_GraphicsLevel *sclk)
2620{
2621 struct ci_power_info *pi = ci_get_pi(rdev);
2622 struct atom_clock_dividers dividers;
2623 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2624 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2625 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2626 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2627 u32 reference_clock = rdev->clock.spll.reference_freq;
2628 u32 reference_divider;
2629 u32 fbdiv;
2630 int ret;
2631
2632 ret = radeon_atom_get_clock_dividers(rdev,
2633 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2634 engine_clock, false, &dividers);
2635 if (ret)
2636 return ret;
2637
2638 reference_divider = 1 + dividers.ref_div;
2639 fbdiv = dividers.fb_div & 0x3FFFFFF;
2640
2641 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2642 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2643 spll_func_cntl_3 |= SPLL_DITHEN;
2644
2645 if (pi->caps_sclk_ss_support) {
2646 struct radeon_atom_ss ss;
2647 u32 vco_freq = engine_clock * dividers.post_div;
2648
2649 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2650 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2651 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2652 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2653
2654 cg_spll_spread_spectrum &= ~CLK_S_MASK;
2655 cg_spll_spread_spectrum |= CLK_S(clk_s);
2656 cg_spll_spread_spectrum |= SSEN;
2657
2658 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2659 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2660 }
2661 }
2662
2663 sclk->SclkFrequency = engine_clock;
2664 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2665 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2666 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2667 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2668 sclk->SclkDid = (u8)dividers.post_divider;
2669
2670 return 0;
2671}
2672
2673static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2674 u32 engine_clock,
2675 u16 sclk_activity_level_t,
2676 SMU7_Discrete_GraphicsLevel *graphic_level)
2677{
2678 struct ci_power_info *pi = ci_get_pi(rdev);
2679 int ret;
2680
2681 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2682 if (ret)
2683 return ret;
2684
2685 ret = ci_get_dependency_volt_by_clk(rdev,
2686 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2687 engine_clock, &graphic_level->MinVddc);
2688 if (ret)
2689 return ret;
2690
2691 graphic_level->SclkFrequency = engine_clock;
2692
2693 graphic_level->Flags = 0;
2694 graphic_level->MinVddcPhases = 1;
2695
2696 if (pi->vddc_phase_shed_control)
2697 ci_populate_phase_value_based_on_sclk(rdev,
2698 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2699 engine_clock,
2700 &graphic_level->MinVddcPhases);
2701
2702 graphic_level->ActivityLevel = sclk_activity_level_t;
2703
2704 graphic_level->CcPwrDynRm = 0;
2705 graphic_level->CcPwrDynRm1 = 0;
2706 graphic_level->EnabledForActivity = 1;
2707 graphic_level->EnabledForThrottle = 1;
2708 graphic_level->UpH = 0;
2709 graphic_level->DownH = 0;
2710 graphic_level->VoltageDownH = 0;
2711 graphic_level->PowerThrottle = 0;
2712
2713 if (pi->caps_sclk_ds)
2714 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2715 engine_clock,
2716 CISLAND_MINIMUM_ENGINE_CLOCK);
2717
2718 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2719
2720 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2721 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2722 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2723 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2724 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2725 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2726 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2727 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2728 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2729 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2730 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2731
2732 return 0;
2733}
2734
2735static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2736{
2737 struct ci_power_info *pi = ci_get_pi(rdev);
2738 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2739 u32 level_array_address = pi->dpm_table_start +
2740 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2741 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2742 SMU7_MAX_LEVELS_GRAPHICS;
2743 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2744 u32 i, ret;
2745
2746 memset(levels, 0, level_array_size);
2747
2748 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2749 ret = ci_populate_single_graphic_level(rdev,
2750 dpm_table->sclk_table.dpm_levels[i].value,
2751 (u16)pi->activity_target[i],
2752 &pi->smc_state_table.GraphicsLevel[i]);
2753 if (ret)
2754 return ret;
2755 if (i == (dpm_table->sclk_table.count - 1))
2756 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2757 PPSMC_DISPLAY_WATERMARK_HIGH;
2758 }
2759
2760 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2761 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2762 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2763
2764 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2765 (u8 *)levels, level_array_size,
2766 pi->sram_end);
2767 if (ret)
2768 return ret;
2769
2770 return 0;
2771}
2772
2773static int ci_populate_ulv_state(struct radeon_device *rdev,
2774 SMU7_Discrete_Ulv *ulv_level)
2775{
2776 return ci_populate_ulv_level(rdev, ulv_level);
2777}
2778
2779static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2780{
2781 struct ci_power_info *pi = ci_get_pi(rdev);
2782 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2783 u32 level_array_address = pi->dpm_table_start +
2784 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2785 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2786 SMU7_MAX_LEVELS_MEMORY;
2787 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2788 u32 i, ret;
2789
2790 memset(levels, 0, level_array_size);
2791
2792 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2793 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2794 return -EINVAL;
2795 ret = ci_populate_single_memory_level(rdev,
2796 dpm_table->mclk_table.dpm_levels[i].value,
2797 &pi->smc_state_table.MemoryLevel[i]);
2798 if (ret)
2799 return ret;
2800 }
2801
2802 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2803
2804 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2805 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2806 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2807
2808 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2809 PPSMC_DISPLAY_WATERMARK_HIGH;
2810
2811 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2812 (u8 *)levels, level_array_size,
2813 pi->sram_end);
2814 if (ret)
2815 return ret;
2816
2817 return 0;
2818}
2819
2820static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2821 struct ci_single_dpm_table* dpm_table,
2822 u32 count)
2823{
2824 u32 i;
2825
2826 dpm_table->count = count;
2827 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2828 dpm_table->dpm_levels[i].enabled = false;
2829}
2830
2831static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2832 u32 index, u32 pcie_gen, u32 pcie_lanes)
2833{
2834 dpm_table->dpm_levels[index].value = pcie_gen;
2835 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2836 dpm_table->dpm_levels[index].enabled = true;
2837}
2838
2839static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2840{
2841 struct ci_power_info *pi = ci_get_pi(rdev);
2842
2843 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2844 return -EINVAL;
2845
2846 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2847 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2848 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2849 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2850 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2851 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2852 }
2853
2854 ci_reset_single_dpm_table(rdev,
2855 &pi->dpm_table.pcie_speed_table,
2856 SMU7_MAX_LEVELS_LINK);
2857
2858 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2859 pi->pcie_gen_powersaving.min,
2860 pi->pcie_lane_powersaving.min);
2861 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2862 pi->pcie_gen_performance.min,
2863 pi->pcie_lane_performance.min);
2864 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2865 pi->pcie_gen_powersaving.min,
2866 pi->pcie_lane_powersaving.max);
2867 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2868 pi->pcie_gen_performance.min,
2869 pi->pcie_lane_performance.max);
2870 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2871 pi->pcie_gen_powersaving.max,
2872 pi->pcie_lane_powersaving.max);
2873 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2874 pi->pcie_gen_performance.max,
2875 pi->pcie_lane_performance.max);
2876
2877 pi->dpm_table.pcie_speed_table.count = 6;
2878
2879 return 0;
2880}
2881
2882static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2883{
2884 struct ci_power_info *pi = ci_get_pi(rdev);
2885 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2886 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2887 struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2888 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2889 struct radeon_cac_leakage_table *std_voltage_table =
2890 &rdev->pm.dpm.dyn_state.cac_leakage_table;
2891 u32 i;
2892
2893 if (allowed_sclk_vddc_table == NULL)
2894 return -EINVAL;
2895 if (allowed_sclk_vddc_table->count < 1)
2896 return -EINVAL;
2897 if (allowed_mclk_table == NULL)
2898 return -EINVAL;
2899 if (allowed_mclk_table->count < 1)
2900 return -EINVAL;
2901
2902 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2903
2904 ci_reset_single_dpm_table(rdev,
2905 &pi->dpm_table.sclk_table,
2906 SMU7_MAX_LEVELS_GRAPHICS);
2907 ci_reset_single_dpm_table(rdev,
2908 &pi->dpm_table.mclk_table,
2909 SMU7_MAX_LEVELS_MEMORY);
2910 ci_reset_single_dpm_table(rdev,
2911 &pi->dpm_table.vddc_table,
2912 SMU7_MAX_LEVELS_VDDC);
2913 ci_reset_single_dpm_table(rdev,
2914 &pi->dpm_table.vddci_table,
2915 SMU7_MAX_LEVELS_VDDCI);
2916 ci_reset_single_dpm_table(rdev,
2917 &pi->dpm_table.mvdd_table,
2918 SMU7_MAX_LEVELS_MVDD);
2919
2920 pi->dpm_table.sclk_table.count = 0;
2921 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2922 if ((i == 0) ||
2923 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2924 allowed_sclk_vddc_table->entries[i].clk)) {
2925 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2926 allowed_sclk_vddc_table->entries[i].clk;
2927 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2928 pi->dpm_table.sclk_table.count++;
2929 }
2930 }
2931
2932 pi->dpm_table.mclk_table.count = 0;
2933 for (i = 0; i < allowed_mclk_table->count; i++) {
2934 if ((i==0) ||
2935 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
2936 allowed_mclk_table->entries[i].clk)) {
2937 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
2938 allowed_mclk_table->entries[i].clk;
2939 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
2940 pi->dpm_table.mclk_table.count++;
2941 }
2942 }
2943
2944 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2945 pi->dpm_table.vddc_table.dpm_levels[i].value =
2946 allowed_sclk_vddc_table->entries[i].v;
2947 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
2948 std_voltage_table->entries[i].leakage;
2949 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
2950 }
2951 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
2952
2953 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
2954 if (allowed_mclk_table) {
2955 for (i = 0; i < allowed_mclk_table->count; i++) {
2956 pi->dpm_table.vddci_table.dpm_levels[i].value =
2957 allowed_mclk_table->entries[i].v;
2958 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
2959 }
2960 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
2961 }
2962
2963 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
2964 if (allowed_mclk_table) {
2965 for (i = 0; i < allowed_mclk_table->count; i++) {
2966 pi->dpm_table.mvdd_table.dpm_levels[i].value =
2967 allowed_mclk_table->entries[i].v;
2968 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
2969 }
2970 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
2971 }
2972
2973 ci_setup_default_pcie_tables(rdev);
2974
2975 return 0;
2976}
2977
2978static int ci_find_boot_level(struct ci_single_dpm_table *table,
2979 u32 value, u32 *boot_level)
2980{
2981 u32 i;
2982 int ret = -EINVAL;
2983
2984 for(i = 0; i < table->count; i++) {
2985 if (value == table->dpm_levels[i].value) {
2986 *boot_level = i;
2987 ret = 0;
2988 }
2989 }
2990
2991 return ret;
2992}
2993
2994static int ci_init_smc_table(struct radeon_device *rdev)
2995{
2996 struct ci_power_info *pi = ci_get_pi(rdev);
2997 struct ci_ulv_parm *ulv = &pi->ulv;
2998 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
2999 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3000 int ret;
3001
3002 ret = ci_setup_default_dpm_tables(rdev);
3003 if (ret)
3004 return ret;
3005
3006 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3007 ci_populate_smc_voltage_tables(rdev, table);
3008
3009 ci_init_fps_limits(rdev);
3010
3011 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3012 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3013
3014 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3015 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3016
3017 if (pi->mem_gddr5)
3018 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3019
3020 if (ulv->supported) {
3021 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3022 if (ret)
3023 return ret;
3024 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3025 }
3026
3027 ret = ci_populate_all_graphic_levels(rdev);
3028 if (ret)
3029 return ret;
3030
3031 ret = ci_populate_all_memory_levels(rdev);
3032 if (ret)
3033 return ret;
3034
3035 ci_populate_smc_link_level(rdev, table);
3036
3037 ret = ci_populate_smc_acpi_level(rdev, table);
3038 if (ret)
3039 return ret;
3040
3041 ret = ci_populate_smc_vce_level(rdev, table);
3042 if (ret)
3043 return ret;
3044
3045 ret = ci_populate_smc_acp_level(rdev, table);
3046 if (ret)
3047 return ret;
3048
3049 ret = ci_populate_smc_samu_level(rdev, table);
3050 if (ret)
3051 return ret;
3052
3053 ret = ci_do_program_memory_timing_parameters(rdev);
3054 if (ret)
3055 return ret;
3056
3057 ret = ci_populate_smc_uvd_level(rdev, table);
3058 if (ret)
3059 return ret;
3060
3061 table->UvdBootLevel = 0;
3062 table->VceBootLevel = 0;
3063 table->AcpBootLevel = 0;
3064 table->SamuBootLevel = 0;
3065 table->GraphicsBootLevel = 0;
3066 table->MemoryBootLevel = 0;
3067
3068 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3069 pi->vbios_boot_state.sclk_bootup_value,
3070 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3071
3072 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3073 pi->vbios_boot_state.mclk_bootup_value,
3074 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3075
3076 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3077 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3078 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3079
3080 ci_populate_smc_initial_state(rdev, radeon_boot_state);
3081
3082 ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3083 if (ret)
3084 return ret;
3085
3086 table->UVDInterval = 1;
3087 table->VCEInterval = 1;
3088 table->ACPInterval = 1;
3089 table->SAMUInterval = 1;
3090 table->GraphicsVoltageChangeEnable = 1;
3091 table->GraphicsThermThrottleEnable = 1;
3092 table->GraphicsInterval = 1;
3093 table->VoltageInterval = 1;
3094 table->ThermalInterval = 1;
3095 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3096 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3097 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3098 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3099 table->MemoryVoltageChangeEnable = 1;
3100 table->MemoryInterval = 1;
3101 table->VoltageResponseTime = 0;
3102 table->VddcVddciDelta = 4000;
3103 table->PhaseResponseTime = 0;
3104 table->MemoryThermThrottleEnable = 1;
3105 table->PCIeBootLinkLevel = 0;
3106 table->PCIeGenInterval = 1;
3107 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3108 table->SVI2Enable = 1;
3109 else
3110 table->SVI2Enable = 0;
3111
3112 table->ThermGpio = 17;
3113 table->SclkStepSize = 0x4000;
3114
3115 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3116 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3117 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3118 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3119 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3120 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3121 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3122 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3123 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3124 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3125 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3126 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3127 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3128 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3129
3130 ret = ci_copy_bytes_to_smc(rdev,
3131 pi->dpm_table_start +
3132 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3133 (u8 *)&table->SystemFlags,
3134 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3135 pi->sram_end);
3136 if (ret)
3137 return ret;
3138
3139 return 0;
3140}
3141
3142static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3143 struct ci_single_dpm_table *dpm_table,
3144 u32 low_limit, u32 high_limit)
3145{
3146 u32 i;
3147
3148 for (i = 0; i < dpm_table->count; i++) {
3149 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3150 (dpm_table->dpm_levels[i].value > high_limit))
3151 dpm_table->dpm_levels[i].enabled = false;
3152 else
3153 dpm_table->dpm_levels[i].enabled = true;
3154 }
3155}
3156
3157static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3158 u32 speed_low, u32 lanes_low,
3159 u32 speed_high, u32 lanes_high)
3160{
3161 struct ci_power_info *pi = ci_get_pi(rdev);
3162 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3163 u32 i, j;
3164
3165 for (i = 0; i < pcie_table->count; i++) {
3166 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3167 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3168 (pcie_table->dpm_levels[i].value > speed_high) ||
3169 (pcie_table->dpm_levels[i].param1 > lanes_high))
3170 pcie_table->dpm_levels[i].enabled = false;
3171 else
3172 pcie_table->dpm_levels[i].enabled = true;
3173 }
3174
3175 for (i = 0; i < pcie_table->count; i++) {
3176 if (pcie_table->dpm_levels[i].enabled) {
3177 for (j = i + 1; j < pcie_table->count; j++) {
3178 if (pcie_table->dpm_levels[j].enabled) {
3179 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3180 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3181 pcie_table->dpm_levels[j].enabled = false;
3182 }
3183 }
3184 }
3185 }
3186}
3187
3188static int ci_trim_dpm_states(struct radeon_device *rdev,
3189 struct radeon_ps *radeon_state)
3190{
3191 struct ci_ps *state = ci_get_ps(radeon_state);
3192 struct ci_power_info *pi = ci_get_pi(rdev);
3193 u32 high_limit_count;
3194
3195 if (state->performance_level_count < 1)
3196 return -EINVAL;
3197
3198 if (state->performance_level_count == 1)
3199 high_limit_count = 0;
3200 else
3201 high_limit_count = 1;
3202
3203 ci_trim_single_dpm_states(rdev,
3204 &pi->dpm_table.sclk_table,
3205 state->performance_levels[0].sclk,
3206 state->performance_levels[high_limit_count].sclk);
3207
3208 ci_trim_single_dpm_states(rdev,
3209 &pi->dpm_table.mclk_table,
3210 state->performance_levels[0].mclk,
3211 state->performance_levels[high_limit_count].mclk);
3212
3213 ci_trim_pcie_dpm_states(rdev,
3214 state->performance_levels[0].pcie_gen,
3215 state->performance_levels[0].pcie_lane,
3216 state->performance_levels[high_limit_count].pcie_gen,
3217 state->performance_levels[high_limit_count].pcie_lane);
3218
3219 return 0;
3220}
3221
3222static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3223{
3224 struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3225 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3226 struct radeon_clock_voltage_dependency_table *vddc_table =
3227 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3228 u32 requested_voltage = 0;
3229 u32 i;
3230
3231 if (disp_voltage_table == NULL)
3232 return -EINVAL;
3233 if (!disp_voltage_table->count)
3234 return -EINVAL;
3235
3236 for (i = 0; i < disp_voltage_table->count; i++) {
3237 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3238 requested_voltage = disp_voltage_table->entries[i].v;
3239 }
3240
3241 for (i = 0; i < vddc_table->count; i++) {
3242 if (requested_voltage <= vddc_table->entries[i].v) {
3243 requested_voltage = vddc_table->entries[i].v;
3244 return (ci_send_msg_to_smc_with_parameter(rdev,
3245 PPSMC_MSG_VddC_Request,
3246 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3247 0 : -EINVAL;
3248 }
3249 }
3250
3251 return -EINVAL;
3252}
3253
3254static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3255{
3256 struct ci_power_info *pi = ci_get_pi(rdev);
3257 PPSMC_Result result;
3258
3259 if (!pi->sclk_dpm_key_disabled) {
3260 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3261 result = ci_send_msg_to_smc_with_parameter(rdev,
3262 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3263 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3264 if (result != PPSMC_Result_OK)
3265 return -EINVAL;
3266 }
3267 }
3268
3269 if (!pi->mclk_dpm_key_disabled) {
3270 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3271 result = ci_send_msg_to_smc_with_parameter(rdev,
3272 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3273 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3274 if (result != PPSMC_Result_OK)
3275 return -EINVAL;
3276 }
3277 }
3278
3279 if (!pi->pcie_dpm_key_disabled) {
3280 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3281 result = ci_send_msg_to_smc_with_parameter(rdev,
3282 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3283 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3284 if (result != PPSMC_Result_OK)
3285 return -EINVAL;
3286 }
3287 }
3288
3289 ci_apply_disp_minimum_voltage_request(rdev);
3290
3291 return 0;
3292}
3293
3294static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3295 struct radeon_ps *radeon_state)
3296{
3297 struct ci_power_info *pi = ci_get_pi(rdev);
3298 struct ci_ps *state = ci_get_ps(radeon_state);
3299 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3300 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3301 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3302 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3303 u32 i;
3304
3305 pi->need_update_smu7_dpm_table = 0;
3306
3307 for (i = 0; i < sclk_table->count; i++) {
3308 if (sclk == sclk_table->dpm_levels[i].value)
3309 break;
3310 }
3311
3312 if (i >= sclk_table->count) {
3313 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3314 } else {
3315 /* XXX check display min clock requirements */
3316 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3317 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3318 }
3319
3320 for (i = 0; i < mclk_table->count; i++) {
3321 if (mclk == mclk_table->dpm_levels[i].value)
3322 break;
3323 }
3324
3325 if (i >= mclk_table->count)
3326 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3327
3328 if (rdev->pm.dpm.current_active_crtc_count !=
3329 rdev->pm.dpm.new_active_crtc_count)
3330 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3331}
3332
3333static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3334 struct radeon_ps *radeon_state)
3335{
3336 struct ci_power_info *pi = ci_get_pi(rdev);
3337 struct ci_ps *state = ci_get_ps(radeon_state);
3338 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3339 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3340 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3341 int ret;
3342
3343 if (!pi->need_update_smu7_dpm_table)
3344 return 0;
3345
3346 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3347 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3348
3349 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3350 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3351
3352 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3353 ret = ci_populate_all_graphic_levels(rdev);
3354 if (ret)
3355 return ret;
3356 }
3357
3358 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3359 ret = ci_populate_all_memory_levels(rdev);
3360 if (ret)
3361 return ret;
3362 }
3363
3364 return 0;
3365}
3366
3367static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3368{
3369 struct ci_power_info *pi = ci_get_pi(rdev);
3370 const struct radeon_clock_and_voltage_limits *max_limits;
3371 int i;
3372
3373 if (rdev->pm.dpm.ac_power)
3374 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3375 else
3376 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3377
3378 if (enable) {
3379 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3380
3381 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3382 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3383 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3384
3385 if (!pi->caps_uvd_dpm)
3386 break;
3387 }
3388 }
3389
3390 ci_send_msg_to_smc_with_parameter(rdev,
3391 PPSMC_MSG_UVDDPM_SetEnabledMask,
3392 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3393
3394 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3395 pi->uvd_enabled = true;
3396 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3397 ci_send_msg_to_smc_with_parameter(rdev,
3398 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3399 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3400 }
3401 } else {
3402 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3403 pi->uvd_enabled = false;
3404 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3405 ci_send_msg_to_smc_with_parameter(rdev,
3406 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3407 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3408 }
3409 }
3410
3411 return (ci_send_msg_to_smc(rdev, enable ?
3412 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3413 0 : -EINVAL;
3414}
3415
3416#if 0
3417static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3418{
3419 struct ci_power_info *pi = ci_get_pi(rdev);
3420 const struct radeon_clock_and_voltage_limits *max_limits;
3421 int i;
3422
3423 if (rdev->pm.dpm.ac_power)
3424 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3425 else
3426 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3427
3428 if (enable) {
3429 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3430 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3431 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3432 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3433
3434 if (!pi->caps_vce_dpm)
3435 break;
3436 }
3437 }
3438
3439 ci_send_msg_to_smc_with_parameter(rdev,
3440 PPSMC_MSG_VCEDPM_SetEnabledMask,
3441 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3442 }
3443
3444 return (ci_send_msg_to_smc(rdev, enable ?
3445 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3446 0 : -EINVAL;
3447}
3448
3449static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3450{
3451 struct ci_power_info *pi = ci_get_pi(rdev);
3452 const struct radeon_clock_and_voltage_limits *max_limits;
3453 int i;
3454
3455 if (rdev->pm.dpm.ac_power)
3456 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3457 else
3458 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3459
3460 if (enable) {
3461 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3462 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3463 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3464 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3465
3466 if (!pi->caps_samu_dpm)
3467 break;
3468 }
3469 }
3470
3471 ci_send_msg_to_smc_with_parameter(rdev,
3472 PPSMC_MSG_SAMUDPM_SetEnabledMask,
3473 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3474 }
3475 return (ci_send_msg_to_smc(rdev, enable ?
3476 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3477 0 : -EINVAL;
3478}
3479
3480static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3481{
3482 struct ci_power_info *pi = ci_get_pi(rdev);
3483 const struct radeon_clock_and_voltage_limits *max_limits;
3484 int i;
3485
3486 if (rdev->pm.dpm.ac_power)
3487 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3488 else
3489 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3490
3491 if (enable) {
3492 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3493 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3494 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3495 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3496
3497 if (!pi->caps_acp_dpm)
3498 break;
3499 }
3500 }
3501
3502 ci_send_msg_to_smc_with_parameter(rdev,
3503 PPSMC_MSG_ACPDPM_SetEnabledMask,
3504 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3505 }
3506
3507 return (ci_send_msg_to_smc(rdev, enable ?
3508 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3509 0 : -EINVAL;
3510}
3511#endif
3512
3513static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3514{
3515 struct ci_power_info *pi = ci_get_pi(rdev);
3516 u32 tmp;
3517
3518 if (!gate) {
3519 if (pi->caps_uvd_dpm ||
3520 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3521 pi->smc_state_table.UvdBootLevel = 0;
3522 else
3523 pi->smc_state_table.UvdBootLevel =
3524 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3525
3526 tmp = RREG32_SMC(DPM_TABLE_475);
3527 tmp &= ~UvdBootLevel_MASK;
3528 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3529 WREG32_SMC(DPM_TABLE_475, tmp);
3530 }
3531
3532 return ci_enable_uvd_dpm(rdev, !gate);
3533}
3534
3535#if 0
3536static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3537{
3538 u8 i;
3539 u32 min_evclk = 30000; /* ??? */
3540 struct radeon_vce_clock_voltage_dependency_table *table =
3541 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3542
3543 for (i = 0; i < table->count; i++) {
3544 if (table->entries[i].evclk >= min_evclk)
3545 return i;
3546 }
3547
3548 return table->count - 1;
3549}
3550
3551static int ci_update_vce_dpm(struct radeon_device *rdev,
3552 struct radeon_ps *radeon_new_state,
3553 struct radeon_ps *radeon_current_state)
3554{
3555 struct ci_power_info *pi = ci_get_pi(rdev);
3556 bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
3557 bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
3558 int ret = 0;
3559 u32 tmp;
3560
3561 if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
3562 if (new_vce_clock_non_zero) {
3563 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3564
3565 tmp = RREG32_SMC(DPM_TABLE_475);
3566 tmp &= ~VceBootLevel_MASK;
3567 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3568 WREG32_SMC(DPM_TABLE_475, tmp);
3569
3570 ret = ci_enable_vce_dpm(rdev, true);
3571 } else {
3572 ret = ci_enable_vce_dpm(rdev, false);
3573 }
3574 }
3575 return ret;
3576}
3577
3578static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3579{
3580 return ci_enable_samu_dpm(rdev, gate);
3581}
3582
3583static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3584{
3585 struct ci_power_info *pi = ci_get_pi(rdev);
3586 u32 tmp;
3587
3588 if (!gate) {
3589 pi->smc_state_table.AcpBootLevel = 0;
3590
3591 tmp = RREG32_SMC(DPM_TABLE_475);
3592 tmp &= ~AcpBootLevel_MASK;
3593 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3594 WREG32_SMC(DPM_TABLE_475, tmp);
3595 }
3596
3597 return ci_enable_acp_dpm(rdev, !gate);
3598}
3599#endif
3600
3601static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3602 struct radeon_ps *radeon_state)
3603{
3604 struct ci_power_info *pi = ci_get_pi(rdev);
3605 int ret;
3606
3607 ret = ci_trim_dpm_states(rdev, radeon_state);
3608 if (ret)
3609 return ret;
3610
3611 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3612 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3613 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3614 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3615 pi->last_mclk_dpm_enable_mask =
3616 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3617 if (pi->uvd_enabled) {
3618 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3619 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3620 }
3621 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3622 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3623
3624 return 0;
3625}
3626
3627static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3628 u32 level_mask)
3629{
3630 u32 level = 0;
3631
3632 while ((level_mask & (1 << level)) == 0)
3633 level++;
3634
3635 return level;
3636}
3637
3638
3639int ci_dpm_force_performance_level(struct radeon_device *rdev,
3640 enum radeon_dpm_forced_level level)
3641{
3642 struct ci_power_info *pi = ci_get_pi(rdev);
3643 PPSMC_Result smc_result;
3644 u32 tmp, levels, i;
3645 int ret;
3646
3647 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3648 if ((!pi->sclk_dpm_key_disabled) &&
3649 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3650 levels = 0;
3651 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3652 while (tmp >>= 1)
3653 levels++;
3654 if (levels) {
3655 ret = ci_dpm_force_state_sclk(rdev, levels);
3656 if (ret)
3657 return ret;
3658 for (i = 0; i < rdev->usec_timeout; i++) {
3659 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3660 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3661 if (tmp == levels)
3662 break;
3663 udelay(1);
3664 }
3665 }
3666 }
3667 if ((!pi->mclk_dpm_key_disabled) &&
3668 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3669 levels = 0;
3670 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3671 while (tmp >>= 1)
3672 levels++;
3673 if (levels) {
3674 ret = ci_dpm_force_state_mclk(rdev, levels);
3675 if (ret)
3676 return ret;
3677 for (i = 0; i < rdev->usec_timeout; i++) {
3678 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3679 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3680 if (tmp == levels)
3681 break;
3682 udelay(1);
3683 }
3684 }
3685 }
3686 if ((!pi->pcie_dpm_key_disabled) &&
3687 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3688 levels = 0;
3689 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3690 while (tmp >>= 1)
3691 levels++;
3692 if (levels) {
3693 ret = ci_dpm_force_state_pcie(rdev, level);
3694 if (ret)
3695 return ret;
3696 for (i = 0; i < rdev->usec_timeout; i++) {
3697 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3698 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3699 if (tmp == levels)
3700 break;
3701 udelay(1);
3702 }
3703 }
3704 }
3705 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3706 if ((!pi->sclk_dpm_key_disabled) &&
3707 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3708 levels = ci_get_lowest_enabled_level(rdev,
3709 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3710 ret = ci_dpm_force_state_sclk(rdev, levels);
3711 if (ret)
3712 return ret;
3713 for (i = 0; i < rdev->usec_timeout; i++) {
3714 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3715 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3716 if (tmp == levels)
3717 break;
3718 udelay(1);
3719 }
3720 }
3721 if ((!pi->mclk_dpm_key_disabled) &&
3722 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3723 levels = ci_get_lowest_enabled_level(rdev,
3724 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3725 ret = ci_dpm_force_state_mclk(rdev, levels);
3726 if (ret)
3727 return ret;
3728 for (i = 0; i < rdev->usec_timeout; i++) {
3729 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3730 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3731 if (tmp == levels)
3732 break;
3733 udelay(1);
3734 }
3735 }
3736 if ((!pi->pcie_dpm_key_disabled) &&
3737 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3738 levels = ci_get_lowest_enabled_level(rdev,
3739 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3740 ret = ci_dpm_force_state_pcie(rdev, levels);
3741 if (ret)
3742 return ret;
3743 for (i = 0; i < rdev->usec_timeout; i++) {
3744 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3745 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3746 if (tmp == levels)
3747 break;
3748 udelay(1);
3749 }
3750 }
3751 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3752 if (!pi->sclk_dpm_key_disabled) {
3753 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3754 if (smc_result != PPSMC_Result_OK)
3755 return -EINVAL;
3756 }
3757 if (!pi->mclk_dpm_key_disabled) {
3758 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3759 if (smc_result != PPSMC_Result_OK)
3760 return -EINVAL;
3761 }
3762 if (!pi->pcie_dpm_key_disabled) {
3763 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3764 if (smc_result != PPSMC_Result_OK)
3765 return -EINVAL;
3766 }
3767 }
3768
3769 rdev->pm.dpm.forced_level = level;
3770
3771 return 0;
3772}
3773
3774static int ci_set_mc_special_registers(struct radeon_device *rdev,
3775 struct ci_mc_reg_table *table)
3776{
3777 struct ci_power_info *pi = ci_get_pi(rdev);
3778 u8 i, j, k;
3779 u32 temp_reg;
3780
3781 for (i = 0, j = table->last; i < table->last; i++) {
3782 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3783 return -EINVAL;
3784 switch(table->mc_reg_address[i].s1 << 2) {
3785 case MC_SEQ_MISC1:
3786 temp_reg = RREG32(MC_PMG_CMD_EMRS);
3787 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3788 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3789 for (k = 0; k < table->num_entries; k++) {
3790 table->mc_reg_table_entry[k].mc_data[j] =
3791 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3792 }
3793 j++;
3794 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3795 return -EINVAL;
3796
3797 temp_reg = RREG32(MC_PMG_CMD_MRS);
3798 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3799 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3800 for (k = 0; k < table->num_entries; k++) {
3801 table->mc_reg_table_entry[k].mc_data[j] =
3802 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3803 if (!pi->mem_gddr5)
3804 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3805 }
3806 j++;
3807 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3808 return -EINVAL;
3809
3810 if (!pi->mem_gddr5) {
3811 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3812 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3813 for (k = 0; k < table->num_entries; k++) {
3814 table->mc_reg_table_entry[k].mc_data[j] =
3815 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3816 }
3817 j++;
3818 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3819 return -EINVAL;
3820 }
3821 break;
3822 case MC_SEQ_RESERVE_M:
3823 temp_reg = RREG32(MC_PMG_CMD_MRS1);
3824 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3825 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3826 for (k = 0; k < table->num_entries; k++) {
3827 table->mc_reg_table_entry[k].mc_data[j] =
3828 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3829 }
3830 j++;
3831 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3832 return -EINVAL;
3833 break;
3834 default:
3835 break;
3836 }
3837
3838 }
3839
3840 table->last = j;
3841
3842 return 0;
3843}
3844
3845static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3846{
3847 bool result = true;
3848
3849 switch(in_reg) {
3850 case MC_SEQ_RAS_TIMING >> 2:
3851 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3852 break;
3853 case MC_SEQ_DLL_STBY >> 2:
3854 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3855 break;
3856 case MC_SEQ_G5PDX_CMD0 >> 2:
3857 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3858 break;
3859 case MC_SEQ_G5PDX_CMD1 >> 2:
3860 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3861 break;
3862 case MC_SEQ_G5PDX_CTRL >> 2:
3863 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3864 break;
3865 case MC_SEQ_CAS_TIMING >> 2:
3866 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3867 break;
3868 case MC_SEQ_MISC_TIMING >> 2:
3869 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3870 break;
3871 case MC_SEQ_MISC_TIMING2 >> 2:
3872 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3873 break;
3874 case MC_SEQ_PMG_DVS_CMD >> 2:
3875 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3876 break;
3877 case MC_SEQ_PMG_DVS_CTL >> 2:
3878 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3879 break;
3880 case MC_SEQ_RD_CTL_D0 >> 2:
3881 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3882 break;
3883 case MC_SEQ_RD_CTL_D1 >> 2:
3884 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3885 break;
3886 case MC_SEQ_WR_CTL_D0 >> 2:
3887 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3888 break;
3889 case MC_SEQ_WR_CTL_D1 >> 2:
3890 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3891 break;
3892 case MC_PMG_CMD_EMRS >> 2:
3893 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3894 break;
3895 case MC_PMG_CMD_MRS >> 2:
3896 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3897 break;
3898 case MC_PMG_CMD_MRS1 >> 2:
3899 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3900 break;
3901 case MC_SEQ_PMG_TIMING >> 2:
3902 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3903 break;
3904 case MC_PMG_CMD_MRS2 >> 2:
3905 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3906 break;
3907 case MC_SEQ_WR_CTL_2 >> 2:
3908 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3909 break;
3910 default:
3911 result = false;
3912 break;
3913 }
3914
3915 return result;
3916}
3917
3918static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3919{
3920 u8 i, j;
3921
3922 for (i = 0; i < table->last; i++) {
3923 for (j = 1; j < table->num_entries; j++) {
3924 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3925 table->mc_reg_table_entry[j].mc_data[i]) {
3926 table->valid_flag |= 1 << i;
3927 break;
3928 }
3929 }
3930 }
3931}
3932
3933static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
3934{
3935 u32 i;
3936 u16 address;
3937
3938 for (i = 0; i < table->last; i++) {
3939 table->mc_reg_address[i].s0 =
3940 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
3941 address : table->mc_reg_address[i].s1;
3942 }
3943}
3944
3945static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
3946 struct ci_mc_reg_table *ci_table)
3947{
3948 u8 i, j;
3949
3950 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3951 return -EINVAL;
3952 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
3953 return -EINVAL;
3954
3955 for (i = 0; i < table->last; i++)
3956 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3957
3958 ci_table->last = table->last;
3959
3960 for (i = 0; i < table->num_entries; i++) {
3961 ci_table->mc_reg_table_entry[i].mclk_max =
3962 table->mc_reg_table_entry[i].mclk_max;
3963 for (j = 0; j < table->last; j++)
3964 ci_table->mc_reg_table_entry[i].mc_data[j] =
3965 table->mc_reg_table_entry[i].mc_data[j];
3966 }
3967 ci_table->num_entries = table->num_entries;
3968
3969 return 0;
3970}
3971
3972static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
3973{
3974 struct ci_power_info *pi = ci_get_pi(rdev);
3975 struct atom_mc_reg_table *table;
3976 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
3977 u8 module_index = rv770_get_memory_module_index(rdev);
3978 int ret;
3979
3980 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
3981 if (!table)
3982 return -ENOMEM;
3983
3984 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
3985 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
3986 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
3987 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
3988 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
3989 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
3990 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
3991 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
3992 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
3993 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
3994 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
3995 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
3996 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
3997 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
3998 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
3999 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4000 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4001 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4002 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4003 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4004
4005 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4006 if (ret)
4007 goto init_mc_done;
4008
4009 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4010 if (ret)
4011 goto init_mc_done;
4012
4013 ci_set_s0_mc_reg_index(ci_table);
4014
4015 ret = ci_set_mc_special_registers(rdev, ci_table);
4016 if (ret)
4017 goto init_mc_done;
4018
4019 ci_set_valid_flag(ci_table);
4020
4021init_mc_done:
4022 kfree(table);
4023
4024 return ret;
4025}
4026
4027static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4028 SMU7_Discrete_MCRegisters *mc_reg_table)
4029{
4030 struct ci_power_info *pi = ci_get_pi(rdev);
4031 u32 i, j;
4032
4033 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4034 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4035 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4036 return -EINVAL;
4037 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4038 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4039 i++;
4040 }
4041 }
4042
4043 mc_reg_table->last = (u8)i;
4044
4045 return 0;
4046}
4047
4048static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4049 SMU7_Discrete_MCRegisterSet *data,
4050 u32 num_entries, u32 valid_flag)
4051{
4052 u32 i, j;
4053
4054 for (i = 0, j = 0; j < num_entries; j++) {
4055 if (valid_flag & (1 << j)) {
4056 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4057 i++;
4058 }
4059 }
4060}
4061
4062static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4063 const u32 memory_clock,
4064 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4065{
4066 struct ci_power_info *pi = ci_get_pi(rdev);
4067 u32 i = 0;
4068
4069 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4070 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4071 break;
4072 }
4073
4074 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4075 --i;
4076
4077 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4078 mc_reg_table_data, pi->mc_reg_table.last,
4079 pi->mc_reg_table.valid_flag);
4080}
4081
4082static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4083 SMU7_Discrete_MCRegisters *mc_reg_table)
4084{
4085 struct ci_power_info *pi = ci_get_pi(rdev);
4086 u32 i;
4087
4088 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4089 ci_convert_mc_reg_table_entry_to_smc(rdev,
4090 pi->dpm_table.mclk_table.dpm_levels[i].value,
4091 &mc_reg_table->data[i]);
4092}
4093
4094static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4095{
4096 struct ci_power_info *pi = ci_get_pi(rdev);
4097 int ret;
4098
4099 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4100
4101 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4102 if (ret)
4103 return ret;
4104 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4105
4106 return ci_copy_bytes_to_smc(rdev,
4107 pi->mc_reg_table_start,
4108 (u8 *)&pi->smc_mc_reg_table,
4109 sizeof(SMU7_Discrete_MCRegisters),
4110 pi->sram_end);
4111}
4112
4113static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4114{
4115 struct ci_power_info *pi = ci_get_pi(rdev);
4116
4117 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4118 return 0;
4119
4120 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4121
4122 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4123
4124 return ci_copy_bytes_to_smc(rdev,
4125 pi->mc_reg_table_start +
4126 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4127 (u8 *)&pi->smc_mc_reg_table.data[0],
4128 sizeof(SMU7_Discrete_MCRegisterSet) *
4129 pi->dpm_table.mclk_table.count,
4130 pi->sram_end);
4131}
4132
4133static void ci_enable_voltage_control(struct radeon_device *rdev)
4134{
4135 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4136
4137 tmp |= VOLT_PWRMGT_EN;
4138 WREG32_SMC(GENERAL_PWRMGT, tmp);
4139}
4140
4141static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4142 struct radeon_ps *radeon_state)
4143{
4144 struct ci_ps *state = ci_get_ps(radeon_state);
4145 int i;
4146 u16 pcie_speed, max_speed = 0;
4147
4148 for (i = 0; i < state->performance_level_count; i++) {
4149 pcie_speed = state->performance_levels[i].pcie_gen;
4150 if (max_speed < pcie_speed)
4151 max_speed = pcie_speed;
4152 }
4153
4154 return max_speed;
4155}
4156
4157static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4158{
4159 u32 speed_cntl = 0;
4160
4161 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4162 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4163
4164 return (u16)speed_cntl;
4165}
4166
4167static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4168{
4169 u32 link_width = 0;
4170
4171 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4172 link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4173
4174 switch (link_width) {
4175 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4176 return 1;
4177 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4178 return 2;
4179 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4180 return 4;
4181 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4182 return 8;
4183 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4184 /* not actually supported */
4185 return 12;
4186 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4187 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4188 default:
4189 return 16;
4190 }
4191}
4192
4193static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4194 struct radeon_ps *radeon_new_state,
4195 struct radeon_ps *radeon_current_state)
4196{
4197 struct ci_power_info *pi = ci_get_pi(rdev);
4198 enum radeon_pcie_gen target_link_speed =
4199 ci_get_maximum_link_speed(rdev, radeon_new_state);
4200 enum radeon_pcie_gen current_link_speed;
4201
4202 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4203 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4204 else
4205 current_link_speed = pi->force_pcie_gen;
4206
4207 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4208 pi->pspp_notify_required = false;
4209 if (target_link_speed > current_link_speed) {
4210 switch (target_link_speed) {
4211 case RADEON_PCIE_GEN3:
4212 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4213 break;
4214 pi->force_pcie_gen = RADEON_PCIE_GEN2;
4215 if (current_link_speed == RADEON_PCIE_GEN2)
4216 break;
4217 case RADEON_PCIE_GEN2:
4218 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4219 break;
4220 default:
4221 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4222 break;
4223 }
4224 } else {
4225 if (target_link_speed < current_link_speed)
4226 pi->pspp_notify_required = true;
4227 }
4228}
4229
4230static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4231 struct radeon_ps *radeon_new_state,
4232 struct radeon_ps *radeon_current_state)
4233{
4234 struct ci_power_info *pi = ci_get_pi(rdev);
4235 enum radeon_pcie_gen target_link_speed =
4236 ci_get_maximum_link_speed(rdev, radeon_new_state);
4237 u8 request;
4238
4239 if (pi->pspp_notify_required) {
4240 if (target_link_speed == RADEON_PCIE_GEN3)
4241 request = PCIE_PERF_REQ_PECI_GEN3;
4242 else if (target_link_speed == RADEON_PCIE_GEN2)
4243 request = PCIE_PERF_REQ_PECI_GEN2;
4244 else
4245 request = PCIE_PERF_REQ_PECI_GEN1;
4246
4247 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4248 (ci_get_current_pcie_speed(rdev) > 0))
4249 return;
4250
4251 radeon_acpi_pcie_performance_request(rdev, request, false);
4252 }
4253}
4254
4255static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4256{
4257 struct ci_power_info *pi = ci_get_pi(rdev);
4258 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4259 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4260 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4261 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4262 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4263 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4264
4265 if (allowed_sclk_vddc_table == NULL)
4266 return -EINVAL;
4267 if (allowed_sclk_vddc_table->count < 1)
4268 return -EINVAL;
4269 if (allowed_mclk_vddc_table == NULL)
4270 return -EINVAL;
4271 if (allowed_mclk_vddc_table->count < 1)
4272 return -EINVAL;
4273 if (allowed_mclk_vddci_table == NULL)
4274 return -EINVAL;
4275 if (allowed_mclk_vddci_table->count < 1)
4276 return -EINVAL;
4277
4278 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4279 pi->max_vddc_in_pp_table =
4280 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4281
4282 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4283 pi->max_vddci_in_pp_table =
4284 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4285
4286 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4287 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4288 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4289 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4290 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4291 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4292 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4293 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4294
4295 return 0;
4296}
4297
4298static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4299{
4300 struct ci_power_info *pi = ci_get_pi(rdev);
4301 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4302 u32 leakage_index;
4303
4304 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4305 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4306 *vddc = leakage_table->actual_voltage[leakage_index];
4307 break;
4308 }
4309 }
4310}
4311
4312static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4313{
4314 struct ci_power_info *pi = ci_get_pi(rdev);
4315 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4316 u32 leakage_index;
4317
4318 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4319 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4320 *vddci = leakage_table->actual_voltage[leakage_index];
4321 break;
4322 }
4323 }
4324}
4325
4326static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4327 struct radeon_clock_voltage_dependency_table *table)
4328{
4329 u32 i;
4330
4331 if (table) {
4332 for (i = 0; i < table->count; i++)
4333 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4334 }
4335}
4336
4337static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4338 struct radeon_clock_voltage_dependency_table *table)
4339{
4340 u32 i;
4341
4342 if (table) {
4343 for (i = 0; i < table->count; i++)
4344 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4345 }
4346}
4347
4348static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4349 struct radeon_vce_clock_voltage_dependency_table *table)
4350{
4351 u32 i;
4352
4353 if (table) {
4354 for (i = 0; i < table->count; i++)
4355 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4356 }
4357}
4358
4359static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4360 struct radeon_uvd_clock_voltage_dependency_table *table)
4361{
4362 u32 i;
4363
4364 if (table) {
4365 for (i = 0; i < table->count; i++)
4366 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4367 }
4368}
4369
4370static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4371 struct radeon_phase_shedding_limits_table *table)
4372{
4373 u32 i;
4374
4375 if (table) {
4376 for (i = 0; i < table->count; i++)
4377 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4378 }
4379}
4380
4381static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4382 struct radeon_clock_and_voltage_limits *table)
4383{
4384 if (table) {
4385 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4386 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4387 }
4388}
4389
4390static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4391 struct radeon_cac_leakage_table *table)
4392{
4393 u32 i;
4394
4395 if (table) {
4396 for (i = 0; i < table->count; i++)
4397 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4398 }
4399}
4400
4401static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4402{
4403
4404 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4405 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4406 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4407 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4408 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4409 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4410 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4411 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4412 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4413 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4414 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4415 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4416 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4417 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4418 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4419 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4420 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4421 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4422 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4423 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4424 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4425 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4426 ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4427 &rdev->pm.dpm.dyn_state.cac_leakage_table);
4428
4429}
4430
4431static void ci_get_memory_type(struct radeon_device *rdev)
4432{
4433 struct ci_power_info *pi = ci_get_pi(rdev);
4434 u32 tmp;
4435
4436 tmp = RREG32(MC_SEQ_MISC0);
4437
4438 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4439 MC_SEQ_MISC0_GDDR5_VALUE)
4440 pi->mem_gddr5 = true;
4441 else
4442 pi->mem_gddr5 = false;
4443
4444}
4445
4446void ci_update_current_ps(struct radeon_device *rdev,
4447 struct radeon_ps *rps)
4448{
4449 struct ci_ps *new_ps = ci_get_ps(rps);
4450 struct ci_power_info *pi = ci_get_pi(rdev);
4451
4452 pi->current_rps = *rps;
4453 pi->current_ps = *new_ps;
4454 pi->current_rps.ps_priv = &pi->current_ps;
4455}
4456
4457void ci_update_requested_ps(struct radeon_device *rdev,
4458 struct radeon_ps *rps)
4459{
4460 struct ci_ps *new_ps = ci_get_ps(rps);
4461 struct ci_power_info *pi = ci_get_pi(rdev);
4462
4463 pi->requested_rps = *rps;
4464 pi->requested_ps = *new_ps;
4465 pi->requested_rps.ps_priv = &pi->requested_ps;
4466}
4467
4468int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4469{
4470 struct ci_power_info *pi = ci_get_pi(rdev);
4471 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4472 struct radeon_ps *new_ps = &requested_ps;
4473
4474 ci_update_requested_ps(rdev, new_ps);
4475
4476 ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4477
4478 return 0;
4479}
4480
4481void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4482{
4483 struct ci_power_info *pi = ci_get_pi(rdev);
4484 struct radeon_ps *new_ps = &pi->requested_rps;
4485
4486 ci_update_current_ps(rdev, new_ps);
4487}
4488
4489
4490void ci_dpm_setup_asic(struct radeon_device *rdev)
4491{
4492 ci_read_clock_registers(rdev);
4493 ci_get_memory_type(rdev);
4494 ci_enable_acpi_power_management(rdev);
4495 ci_init_sclk_t(rdev);
4496}
4497
4498int ci_dpm_enable(struct radeon_device *rdev)
4499{
4500 struct ci_power_info *pi = ci_get_pi(rdev);
4501 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4502 int ret;
4503
4504 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4505 RADEON_CG_BLOCK_MC |
4506 RADEON_CG_BLOCK_SDMA |
4507 RADEON_CG_BLOCK_BIF |
4508 RADEON_CG_BLOCK_UVD |
4509 RADEON_CG_BLOCK_HDP), false);
4510
4511 if (ci_is_smc_running(rdev))
4512 return -EINVAL;
4513 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4514 ci_enable_voltage_control(rdev);
4515 ret = ci_construct_voltage_tables(rdev);
4516 if (ret) {
4517 DRM_ERROR("ci_construct_voltage_tables failed\n");
4518 return ret;
4519 }
4520 }
4521 if (pi->caps_dynamic_ac_timing) {
4522 ret = ci_initialize_mc_reg_table(rdev);
4523 if (ret)
4524 pi->caps_dynamic_ac_timing = false;
4525 }
4526 if (pi->dynamic_ss)
4527 ci_enable_spread_spectrum(rdev, true);
4528 if (pi->thermal_protection)
4529 ci_enable_thermal_protection(rdev, true);
4530 ci_program_sstp(rdev);
4531 ci_enable_display_gap(rdev);
4532 ci_program_vc(rdev);
4533 ret = ci_upload_firmware(rdev);
4534 if (ret) {
4535 DRM_ERROR("ci_upload_firmware failed\n");
4536 return ret;
4537 }
4538 ret = ci_process_firmware_header(rdev);
4539 if (ret) {
4540 DRM_ERROR("ci_process_firmware_header failed\n");
4541 return ret;
4542 }
4543 ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4544 if (ret) {
4545 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4546 return ret;
4547 }
4548 ret = ci_init_smc_table(rdev);
4549 if (ret) {
4550 DRM_ERROR("ci_init_smc_table failed\n");
4551 return ret;
4552 }
4553 ret = ci_init_arb_table_index(rdev);
4554 if (ret) {
4555 DRM_ERROR("ci_init_arb_table_index failed\n");
4556 return ret;
4557 }
4558 if (pi->caps_dynamic_ac_timing) {
4559 ret = ci_populate_initial_mc_reg_table(rdev);
4560 if (ret) {
4561 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4562 return ret;
4563 }
4564 }
4565 ret = ci_populate_pm_base(rdev);
4566 if (ret) {
4567 DRM_ERROR("ci_populate_pm_base failed\n");
4568 return ret;
4569 }
4570 ci_dpm_start_smc(rdev);
4571 ci_enable_vr_hot_gpio_interrupt(rdev);
4572 ret = ci_notify_smc_display_change(rdev, false);
4573 if (ret) {
4574 DRM_ERROR("ci_notify_smc_display_change failed\n");
4575 return ret;
4576 }
4577 ci_enable_sclk_control(rdev, true);
4578 ret = ci_enable_ulv(rdev, true);
4579 if (ret) {
4580 DRM_ERROR("ci_enable_ulv failed\n");
4581 return ret;
4582 }
4583 ret = ci_enable_ds_master_switch(rdev, true);
4584 if (ret) {
4585 DRM_ERROR("ci_enable_ds_master_switch failed\n");
4586 return ret;
4587 }
4588 ret = ci_start_dpm(rdev);
4589 if (ret) {
4590 DRM_ERROR("ci_start_dpm failed\n");
4591 return ret;
4592 }
4593 ret = ci_enable_didt(rdev, true);
4594 if (ret) {
4595 DRM_ERROR("ci_enable_didt failed\n");
4596 return ret;
4597 }
4598 ret = ci_enable_smc_cac(rdev, true);
4599 if (ret) {
4600 DRM_ERROR("ci_enable_smc_cac failed\n");
4601 return ret;
4602 }
4603 ret = ci_enable_power_containment(rdev, true);
4604 if (ret) {
4605 DRM_ERROR("ci_enable_power_containment failed\n");
4606 return ret;
4607 }
4608 if (rdev->irq.installed &&
4609 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4610#if 0
4611 PPSMC_Result result;
4612#endif
4613 ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4614 if (ret) {
4615 DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4616 return ret;
4617 }
4618 rdev->irq.dpm_thermal = true;
4619 radeon_irq_set(rdev);
4620#if 0
4621 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4622
4623 if (result != PPSMC_Result_OK)
4624 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4625#endif
4626 }
4627
4628 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4629
4630 ci_dpm_powergate_uvd(rdev, true);
4631
4632 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4633 RADEON_CG_BLOCK_MC |
4634 RADEON_CG_BLOCK_SDMA |
4635 RADEON_CG_BLOCK_BIF |
4636 RADEON_CG_BLOCK_UVD |
4637 RADEON_CG_BLOCK_HDP), true);
4638
4639 ci_update_current_ps(rdev, boot_ps);
4640
4641 return 0;
4642}
4643
4644void ci_dpm_disable(struct radeon_device *rdev)
4645{
4646 struct ci_power_info *pi = ci_get_pi(rdev);
4647 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4648
4649 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4650 RADEON_CG_BLOCK_MC |
4651 RADEON_CG_BLOCK_SDMA |
4652 RADEON_CG_BLOCK_UVD |
4653 RADEON_CG_BLOCK_HDP), false);
4654
4655 ci_dpm_powergate_uvd(rdev, false);
4656
4657 if (!ci_is_smc_running(rdev))
4658 return;
4659
4660 if (pi->thermal_protection)
4661 ci_enable_thermal_protection(rdev, false);
4662 ci_enable_power_containment(rdev, false);
4663 ci_enable_smc_cac(rdev, false);
4664 ci_enable_didt(rdev, false);
4665 ci_enable_spread_spectrum(rdev, false);
4666 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4667 ci_stop_dpm(rdev);
4668 ci_enable_ds_master_switch(rdev, true);
4669 ci_enable_ulv(rdev, false);
4670 ci_clear_vc(rdev);
4671 ci_reset_to_default(rdev);
4672 ci_dpm_stop_smc(rdev);
4673 ci_force_switch_to_arb_f0(rdev);
4674
4675 ci_update_current_ps(rdev, boot_ps);
4676}
4677
4678int ci_dpm_set_power_state(struct radeon_device *rdev)
4679{
4680 struct ci_power_info *pi = ci_get_pi(rdev);
4681 struct radeon_ps *new_ps = &pi->requested_rps;
4682 struct radeon_ps *old_ps = &pi->current_rps;
4683 int ret;
4684
4685 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4686 RADEON_CG_BLOCK_MC |
4687 RADEON_CG_BLOCK_SDMA |
4688 RADEON_CG_BLOCK_BIF |
4689 RADEON_CG_BLOCK_UVD |
4690 RADEON_CG_BLOCK_HDP), false);
4691
4692 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4693 if (pi->pcie_performance_request)
4694 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4695 ret = ci_freeze_sclk_mclk_dpm(rdev);
4696 if (ret) {
4697 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4698 return ret;
4699 }
4700 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4701 if (ret) {
4702 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4703 return ret;
4704 }
4705 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4706 if (ret) {
4707 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4708 return ret;
4709 }
4710#if 0
4711 ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4712 if (ret) {
4713 DRM_ERROR("ci_update_vce_dpm failed\n");
4714 return ret;
4715 }
4716#endif
4717 ret = ci_update_sclk_t(rdev);
4718 if (ret) {
4719 DRM_ERROR("ci_update_sclk_t failed\n");
4720 return ret;
4721 }
4722 if (pi->caps_dynamic_ac_timing) {
4723 ret = ci_update_and_upload_mc_reg_table(rdev);
4724 if (ret) {
4725 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4726 return ret;
4727 }
4728 }
4729 ret = ci_program_memory_timing_parameters(rdev);
4730 if (ret) {
4731 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4732 return ret;
4733 }
4734 ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4735 if (ret) {
4736 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4737 return ret;
4738 }
4739 ret = ci_upload_dpm_level_enable_mask(rdev);
4740 if (ret) {
4741 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4742 return ret;
4743 }
4744 if (pi->pcie_performance_request)
4745 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4746
4747 ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
4748 if (ret) {
4749 DRM_ERROR("ci_dpm_force_performance_level failed\n");
4750 return ret;
4751 }
4752
4753 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4754 RADEON_CG_BLOCK_MC |
4755 RADEON_CG_BLOCK_SDMA |
4756 RADEON_CG_BLOCK_BIF |
4757 RADEON_CG_BLOCK_UVD |
4758 RADEON_CG_BLOCK_HDP), true);
4759
4760 return 0;
4761}
4762
4763int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4764{
4765 return ci_power_control_set_level(rdev);
4766}
4767
4768void ci_dpm_reset_asic(struct radeon_device *rdev)
4769{
4770 ci_set_boot_state(rdev);
4771}
4772
4773void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4774{
4775 ci_program_display_gap(rdev);
4776}
4777
4778union power_info {
4779 struct _ATOM_POWERPLAY_INFO info;
4780 struct _ATOM_POWERPLAY_INFO_V2 info_2;
4781 struct _ATOM_POWERPLAY_INFO_V3 info_3;
4782 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4783 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4784 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4785};
4786
4787union pplib_clock_info {
4788 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4789 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4790 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4791 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4792 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4793 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4794};
4795
4796union pplib_power_state {
4797 struct _ATOM_PPLIB_STATE v1;
4798 struct _ATOM_PPLIB_STATE_V2 v2;
4799};
4800
4801static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4802 struct radeon_ps *rps,
4803 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4804 u8 table_rev)
4805{
4806 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4807 rps->class = le16_to_cpu(non_clock_info->usClassification);
4808 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4809
4810 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4811 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4812 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4813 } else {
4814 rps->vclk = 0;
4815 rps->dclk = 0;
4816 }
4817
4818 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4819 rdev->pm.dpm.boot_ps = rps;
4820 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4821 rdev->pm.dpm.uvd_ps = rps;
4822}
4823
4824static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4825 struct radeon_ps *rps, int index,
4826 union pplib_clock_info *clock_info)
4827{
4828 struct ci_power_info *pi = ci_get_pi(rdev);
4829 struct ci_ps *ps = ci_get_ps(rps);
4830 struct ci_pl *pl = &ps->performance_levels[index];
4831
4832 ps->performance_level_count = index + 1;
4833
4834 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4835 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4836 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4837 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4838
4839 pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4840 pi->sys_pcie_mask,
4841 pi->vbios_boot_state.pcie_gen_bootup_value,
4842 clock_info->ci.ucPCIEGen);
4843 pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4844 pi->vbios_boot_state.pcie_lane_bootup_value,
4845 le16_to_cpu(clock_info->ci.usPCIELane));
4846
4847 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4848 pi->acpi_pcie_gen = pl->pcie_gen;
4849 }
4850
4851 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4852 pi->ulv.supported = true;
4853 pi->ulv.pl = *pl;
4854 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4855 }
4856
4857 /* patch up boot state */
4858 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4859 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4860 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4861 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4862 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4863 }
4864
4865 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4866 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4867 pi->use_pcie_powersaving_levels = true;
4868 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4869 pi->pcie_gen_powersaving.max = pl->pcie_gen;
4870 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4871 pi->pcie_gen_powersaving.min = pl->pcie_gen;
4872 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4873 pi->pcie_lane_powersaving.max = pl->pcie_lane;
4874 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4875 pi->pcie_lane_powersaving.min = pl->pcie_lane;
4876 break;
4877 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4878 pi->use_pcie_performance_levels = true;
4879 if (pi->pcie_gen_performance.max < pl->pcie_gen)
4880 pi->pcie_gen_performance.max = pl->pcie_gen;
4881 if (pi->pcie_gen_performance.min > pl->pcie_gen)
4882 pi->pcie_gen_performance.min = pl->pcie_gen;
4883 if (pi->pcie_lane_performance.max < pl->pcie_lane)
4884 pi->pcie_lane_performance.max = pl->pcie_lane;
4885 if (pi->pcie_lane_performance.min > pl->pcie_lane)
4886 pi->pcie_lane_performance.min = pl->pcie_lane;
4887 break;
4888 default:
4889 break;
4890 }
4891}
4892
4893static int ci_parse_power_table(struct radeon_device *rdev)
4894{
4895 struct radeon_mode_info *mode_info = &rdev->mode_info;
4896 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4897 union pplib_power_state *power_state;
4898 int i, j, k, non_clock_array_index, clock_array_index;
4899 union pplib_clock_info *clock_info;
4900 struct _StateArray *state_array;
4901 struct _ClockInfoArray *clock_info_array;
4902 struct _NonClockInfoArray *non_clock_info_array;
4903 union power_info *power_info;
4904 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4905 u16 data_offset;
4906 u8 frev, crev;
4907 u8 *power_state_offset;
4908 struct ci_ps *ps;
4909
4910 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4911 &frev, &crev, &data_offset))
4912 return -EINVAL;
4913 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4914
4915 state_array = (struct _StateArray *)
4916 (mode_info->atom_context->bios + data_offset +
4917 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4918 clock_info_array = (struct _ClockInfoArray *)
4919 (mode_info->atom_context->bios + data_offset +
4920 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4921 non_clock_info_array = (struct _NonClockInfoArray *)
4922 (mode_info->atom_context->bios + data_offset +
4923 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4924
4925 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4926 state_array->ucNumEntries, GFP_KERNEL);
4927 if (!rdev->pm.dpm.ps)
4928 return -ENOMEM;
4929 power_state_offset = (u8 *)state_array->states;
4930 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
4931 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
4932 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
4933 for (i = 0; i < state_array->ucNumEntries; i++) {
4934 u8 *idx;
4935 power_state = (union pplib_power_state *)power_state_offset;
4936 non_clock_array_index = power_state->v2.nonClockInfoIndex;
4937 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4938 &non_clock_info_array->nonClockInfo[non_clock_array_index];
4939 if (!rdev->pm.power_state[i].clock_info)
4940 return -EINVAL;
4941 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4942 if (ps == NULL) {
4943 kfree(rdev->pm.dpm.ps);
4944 return -ENOMEM;
4945 }
4946 rdev->pm.dpm.ps[i].ps_priv = ps;
4947 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4948 non_clock_info,
4949 non_clock_info_array->ucEntrySize);
4950 k = 0;
4951 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
4952 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4953 clock_array_index = idx[j];
4954 if (clock_array_index >= clock_info_array->ucNumEntries)
4955 continue;
4956 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4957 break;
4958 clock_info = (union pplib_clock_info *)
4959 ((u8 *)&clock_info_array->clockInfo[0] +
4960 (clock_array_index * clock_info_array->ucEntrySize));
4961 ci_parse_pplib_clock_info(rdev,
4962 &rdev->pm.dpm.ps[i], k,
4963 clock_info);
4964 k++;
4965 }
4966 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
4967 }
4968 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
4969 return 0;
4970}
4971
4972int ci_get_vbios_boot_values(struct radeon_device *rdev,
4973 struct ci_vbios_boot_state *boot_state)
4974{
4975 struct radeon_mode_info *mode_info = &rdev->mode_info;
4976 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
4977 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
4978 u8 frev, crev;
4979 u16 data_offset;
4980
4981 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
4982 &frev, &crev, &data_offset)) {
4983 firmware_info =
4984 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
4985 data_offset);
4986 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
4987 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
4988 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
4989 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
4990 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
4991 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
4992 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
4993
4994 return 0;
4995 }
4996 return -EINVAL;
4997}
4998
4999void ci_dpm_fini(struct radeon_device *rdev)
5000{
5001 int i;
5002
5003 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5004 kfree(rdev->pm.dpm.ps[i].ps_priv);
5005 }
5006 kfree(rdev->pm.dpm.ps);
5007 kfree(rdev->pm.dpm.priv);
5008 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5009 r600_free_extended_power_table(rdev);
5010}
5011
5012int ci_dpm_init(struct radeon_device *rdev)
5013{
5014 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5015 u16 data_offset, size;
5016 u8 frev, crev;
5017 struct ci_power_info *pi;
5018 int ret;
5019 u32 mask;
5020
5021 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5022 if (pi == NULL)
5023 return -ENOMEM;
5024 rdev->pm.dpm.priv = pi;
5025
5026 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5027 if (ret)
5028 pi->sys_pcie_mask = 0;
5029 else
5030 pi->sys_pcie_mask = mask;
5031 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5032
5033 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5034 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5035 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5036 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5037
5038 pi->pcie_lane_performance.max = 0;
5039 pi->pcie_lane_performance.min = 16;
5040 pi->pcie_lane_powersaving.max = 0;
5041 pi->pcie_lane_powersaving.min = 16;
5042
5043 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5044 if (ret) {
5045 ci_dpm_fini(rdev);
5046 return ret;
5047 }
5048 ret = ci_parse_power_table(rdev);
5049 if (ret) {
5050 ci_dpm_fini(rdev);
5051 return ret;
5052 }
5053 ret = r600_parse_extended_power_table(rdev);
5054 if (ret) {
5055 ci_dpm_fini(rdev);
5056 return ret;
5057 }
5058
5059 pi->dll_default_on = false;
5060 pi->sram_end = SMC_RAM_END;
5061
5062 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5063 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5064 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5065 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5066 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5067 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5068 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5069 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5070
5071 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5072
5073 pi->sclk_dpm_key_disabled = 0;
5074 pi->mclk_dpm_key_disabled = 0;
5075 pi->pcie_dpm_key_disabled = 0;
5076
5077 pi->caps_sclk_ds = true;
5078
5079 pi->mclk_strobe_mode_threshold = 40000;
5080 pi->mclk_stutter_mode_threshold = 40000;
5081 pi->mclk_edc_enable_threshold = 40000;
5082 pi->mclk_edc_wr_enable_threshold = 40000;
5083
5084 ci_initialize_powertune_defaults(rdev);
5085
5086 pi->caps_fps = false;
5087
5088 pi->caps_sclk_throttle_low_notification = false;
5089
5090 pi->caps_uvd_dpm = true;
5091
5092 ci_get_leakage_voltages(rdev);
5093 ci_patch_dependency_tables_with_leakage(rdev);
5094 ci_set_private_data_variables_based_on_pptable(rdev);
5095
5096 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5097 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5098 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5099 ci_dpm_fini(rdev);
5100 return -ENOMEM;
5101 }
5102 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5103 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5104 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5105 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5106 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5107 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5108 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5109 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5110 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5111
5112 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5113 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5114 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5115
5116 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5117 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5118 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5119 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5120
5121 pi->thermal_temp_setting.temperature_low = 99500;
5122 pi->thermal_temp_setting.temperature_high = 100000;
5123 pi->thermal_temp_setting.temperature_shutdown = 104000;
5124
5125 pi->uvd_enabled = false;
5126
5127 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5128 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5129 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5130 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5131 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5132 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5133 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5134
5135 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5136 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5137 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5138 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5139 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5140 else
5141 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5142 }
5143
5144 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5145 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5146 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5147 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5148 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5149 else
5150 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5151 }
5152
5153 pi->vddc_phase_shed_control = true;
5154
5155#if defined(CONFIG_ACPI)
5156 pi->pcie_performance_request =
5157 radeon_acpi_is_pcie_performance_request_supported(rdev);
5158#else
5159 pi->pcie_performance_request = false;
5160#endif
5161
5162 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5163 &frev, &crev, &data_offset)) {
5164 pi->caps_sclk_ss_support = true;
5165 pi->caps_mclk_ss_support = true;
5166 pi->dynamic_ss = true;
5167 } else {
5168 pi->caps_sclk_ss_support = false;
5169 pi->caps_mclk_ss_support = false;
5170 pi->dynamic_ss = true;
5171 }
5172
5173 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5174 pi->thermal_protection = true;
5175 else
5176 pi->thermal_protection = false;
5177
5178 pi->caps_dynamic_ac_timing = true;
5179
5180 pi->uvd_power_gated = false;
5181
5182 /* make sure dc limits are valid */
5183 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5184 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5185 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5186 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5187
5188 return 0;
5189}
5190
5191void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5192 struct seq_file *m)
5193{
5194 u32 sclk = ci_get_average_sclk_freq(rdev);
5195 u32 mclk = ci_get_average_mclk_freq(rdev);
5196
5197 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
5198 sclk, mclk);
5199}
5200
5201void ci_dpm_print_power_state(struct radeon_device *rdev,
5202 struct radeon_ps *rps)
5203{
5204 struct ci_ps *ps = ci_get_ps(rps);
5205 struct ci_pl *pl;
5206 int i;
5207
5208 r600_dpm_print_class_info(rps->class, rps->class2);
5209 r600_dpm_print_cap_info(rps->caps);
5210 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5211 for (i = 0; i < ps->performance_level_count; i++) {
5212 pl = &ps->performance_levels[i];
5213 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5214 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5215 }
5216 r600_dpm_print_ps_status(rdev, rps);
5217}
5218
5219u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5220{
5221 struct ci_power_info *pi = ci_get_pi(rdev);
5222 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5223
5224 if (low)
5225 return requested_state->performance_levels[0].sclk;
5226 else
5227 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5228}
5229
5230u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5231{
5232 struct ci_power_info *pi = ci_get_pi(rdev);
5233 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5234
5235 if (low)
5236 return requested_state->performance_levels[0].mclk;
5237 else
5238 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5239}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
new file mode 100644
index 000000000000..93bbed977ffb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -0,0 +1,332 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __CI_DPM_H__
24#define __CI_DPM_H__
25
26#include "ppsmc.h"
27
28#define SMU__NUM_SCLK_DPM_STATE 8
29#define SMU__NUM_MCLK_DPM_LEVELS 6
30#define SMU__NUM_LCLK_DPM_LEVELS 8
31#define SMU__NUM_PCIE_DPM_LEVELS 8
32#include "smu7_discrete.h"
33
34#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
35
36struct ci_pl {
37 u32 mclk;
38 u32 sclk;
39 enum radeon_pcie_gen pcie_gen;
40 u16 pcie_lane;
41};
42
43struct ci_ps {
44 u16 performance_level_count;
45 bool dc_compatible;
46 u32 sclk_t;
47 struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
48};
49
50struct ci_dpm_level {
51 bool enabled;
52 u32 value;
53 u32 param1;
54};
55
56#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
57#define MAX_REGULAR_DPM_NUMBER 8
58#define CISLAND_MINIMUM_ENGINE_CLOCK 800
59
60struct ci_single_dpm_table {
61 u32 count;
62 struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
63};
64
65struct ci_dpm_table {
66 struct ci_single_dpm_table sclk_table;
67 struct ci_single_dpm_table mclk_table;
68 struct ci_single_dpm_table pcie_speed_table;
69 struct ci_single_dpm_table vddc_table;
70 struct ci_single_dpm_table vddci_table;
71 struct ci_single_dpm_table mvdd_table;
72};
73
74struct ci_mc_reg_entry {
75 u32 mclk_max;
76 u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
77};
78
79struct ci_mc_reg_table {
80 u8 last;
81 u8 num_entries;
82 u16 valid_flag;
83 struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
84 SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
85};
86
87struct ci_ulv_parm
88{
89 bool supported;
90 u32 cg_ulv_parameter;
91 u32 volt_change_delay;
92 struct ci_pl pl;
93};
94
95#define CISLANDS_MAX_LEAKAGE_COUNT 8
96
97struct ci_leakage_voltage {
98 u16 count;
99 u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
100 u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
101};
102
103struct ci_dpm_level_enable_mask {
104 u32 uvd_dpm_enable_mask;
105 u32 vce_dpm_enable_mask;
106 u32 acp_dpm_enable_mask;
107 u32 samu_dpm_enable_mask;
108 u32 sclk_dpm_enable_mask;
109 u32 mclk_dpm_enable_mask;
110 u32 pcie_dpm_enable_mask;
111};
112
113struct ci_vbios_boot_state
114{
115 u16 mvdd_bootup_value;
116 u16 vddc_bootup_value;
117 u16 vddci_bootup_value;
118 u32 sclk_bootup_value;
119 u32 mclk_bootup_value;
120 u16 pcie_gen_bootup_value;
121 u16 pcie_lane_bootup_value;
122};
123
124struct ci_clock_registers {
125 u32 cg_spll_func_cntl;
126 u32 cg_spll_func_cntl_2;
127 u32 cg_spll_func_cntl_3;
128 u32 cg_spll_func_cntl_4;
129 u32 cg_spll_spread_spectrum;
130 u32 cg_spll_spread_spectrum_2;
131 u32 dll_cntl;
132 u32 mclk_pwrmgt_cntl;
133 u32 mpll_ad_func_cntl;
134 u32 mpll_dq_func_cntl;
135 u32 mpll_func_cntl;
136 u32 mpll_func_cntl_1;
137 u32 mpll_func_cntl_2;
138 u32 mpll_ss1;
139 u32 mpll_ss2;
140};
141
142struct ci_thermal_temperature_setting {
143 s32 temperature_low;
144 s32 temperature_high;
145 s32 temperature_shutdown;
146};
147
148struct ci_pcie_perf_range {
149 u16 max;
150 u16 min;
151};
152
153enum ci_pt_config_reg_type {
154 CISLANDS_CONFIGREG_MMR = 0,
155 CISLANDS_CONFIGREG_SMC_IND,
156 CISLANDS_CONFIGREG_DIDT_IND,
157 CISLANDS_CONFIGREG_CACHE,
158 CISLANDS_CONFIGREG_MAX
159};
160
161#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
162#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
163#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
164
165struct ci_pt_config_reg {
166 u32 offset;
167 u32 mask;
168 u32 shift;
169 u32 value;
170 enum ci_pt_config_reg_type type;
171};
172
173struct ci_pt_defaults {
174 u8 svi_load_line_en;
175 u8 svi_load_line_vddc;
176 u8 tdc_vddc_throttle_release_limit_perc;
177 u8 tdc_mawt;
178 u8 tdc_waterfall_ctl;
179 u8 dte_ambient_temp_base;
180 u32 display_cac;
181 u32 bapm_temp_gradient;
182 u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
183 u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
184};
185
186#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
187#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
188#define DPMTABLE_UPDATE_SCLK 0x00000004
189#define DPMTABLE_UPDATE_MCLK 0x00000008
190
191struct ci_power_info {
192 struct ci_dpm_table dpm_table;
193 u32 voltage_control;
194 u32 mvdd_control;
195 u32 vddci_control;
196 u32 active_auto_throttle_sources;
197 struct ci_clock_registers clock_registers;
198 u16 acpi_vddc;
199 u16 acpi_vddci;
200 enum radeon_pcie_gen force_pcie_gen;
201 enum radeon_pcie_gen acpi_pcie_gen;
202 struct ci_leakage_voltage vddc_leakage;
203 struct ci_leakage_voltage vddci_leakage;
204 u16 max_vddc_in_pp_table;
205 u16 min_vddc_in_pp_table;
206 u16 max_vddci_in_pp_table;
207 u16 min_vddci_in_pp_table;
208 u32 mclk_strobe_mode_threshold;
209 u32 mclk_stutter_mode_threshold;
210 u32 mclk_edc_enable_threshold;
211 u32 mclk_edc_wr_enable_threshold;
212 struct ci_vbios_boot_state vbios_boot_state;
213 /* smc offsets */
214 u32 sram_end;
215 u32 dpm_table_start;
216 u32 soft_regs_start;
217 u32 mc_reg_table_start;
218 u32 fan_table_start;
219 u32 arb_table_start;
220 /* smc tables */
221 SMU7_Discrete_DpmTable smc_state_table;
222 SMU7_Discrete_MCRegisters smc_mc_reg_table;
223 SMU7_Discrete_PmFuses smc_powertune_table;
224 /* other stuff */
225 struct ci_mc_reg_table mc_reg_table;
226 struct atom_voltage_table vddc_voltage_table;
227 struct atom_voltage_table vddci_voltage_table;
228 struct atom_voltage_table mvdd_voltage_table;
229 struct ci_ulv_parm ulv;
230 u32 power_containment_features;
231 const struct ci_pt_defaults *powertune_defaults;
232 u32 dte_tj_offset;
233 bool vddc_phase_shed_control;
234 struct ci_thermal_temperature_setting thermal_temp_setting;
235 struct ci_dpm_level_enable_mask dpm_level_enable_mask;
236 u32 need_update_smu7_dpm_table;
237 u32 sclk_dpm_key_disabled;
238 u32 mclk_dpm_key_disabled;
239 u32 pcie_dpm_key_disabled;
240 struct ci_pcie_perf_range pcie_gen_performance;
241 struct ci_pcie_perf_range pcie_lane_performance;
242 struct ci_pcie_perf_range pcie_gen_powersaving;
243 struct ci_pcie_perf_range pcie_lane_powersaving;
244 u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
245 u32 mclk_activity_target;
246 u32 low_sclk_interrupt_t;
247 u32 last_mclk_dpm_enable_mask;
248 u32 sys_pcie_mask;
249 /* caps */
250 bool caps_power_containment;
251 bool caps_cac;
252 bool caps_sq_ramping;
253 bool caps_db_ramping;
254 bool caps_td_ramping;
255 bool caps_tcp_ramping;
256 bool caps_fps;
257 bool caps_sclk_ds;
258 bool caps_sclk_ss_support;
259 bool caps_mclk_ss_support;
260 bool caps_uvd_dpm;
261 bool caps_vce_dpm;
262 bool caps_samu_dpm;
263 bool caps_acp_dpm;
264 bool caps_automatic_dc_transition;
265 bool caps_sclk_throttle_low_notification;
266 bool caps_dynamic_ac_timing;
267 /* flags */
268 bool thermal_protection;
269 bool pcie_performance_request;
270 bool dynamic_ss;
271 bool dll_default_on;
272 bool cac_enabled;
273 bool uvd_enabled;
274 bool battery_state;
275 bool pspp_notify_required;
276 bool mem_gddr5;
277 bool enable_bapm_feature;
278 bool enable_tdc_limit_feature;
279 bool enable_pkg_pwr_tracking_feature;
280 bool use_pcie_performance_levels;
281 bool use_pcie_powersaving_levels;
282 bool uvd_power_gated;
283 /* driver states */
284 struct radeon_ps current_rps;
285 struct ci_ps current_ps;
286 struct radeon_ps requested_rps;
287 struct ci_ps requested_ps;
288};
289
290#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
291#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1
292#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2
293
294#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256
295
296#define CISLANDS_VRC_DFLT0 0x3FFFC000
297#define CISLANDS_VRC_DFLT1 0x000400
298#define CISLANDS_VRC_DFLT2 0xC00080
299#define CISLANDS_VRC_DFLT3 0xC00200
300#define CISLANDS_VRC_DFLT4 0xC01680
301#define CISLANDS_VRC_DFLT5 0xC00033
302#define CISLANDS_VRC_DFLT6 0xC00033
303#define CISLANDS_VRC_DFLT7 0x3FFFC000
304
305#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035
306#define CISLAND_TARGETACTIVITY_DFLT 30
307#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10
308
309#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
310#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
311#define PCIE_PERF_REQ_PECI_GEN1 2
312#define PCIE_PERF_REQ_PECI_GEN2 3
313#define PCIE_PERF_REQ_PECI_GEN3 4
314
315int ci_copy_bytes_to_smc(struct radeon_device *rdev,
316 u32 smc_start_address,
317 const u8 *src, u32 byte_count, u32 limit);
318void ci_start_smc(struct radeon_device *rdev);
319void ci_reset_smc(struct radeon_device *rdev);
320int ci_program_jump_on_start(struct radeon_device *rdev);
321void ci_stop_smc_clock(struct radeon_device *rdev);
322void ci_start_smc_clock(struct radeon_device *rdev);
323bool ci_is_smc_running(struct radeon_device *rdev);
324PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
325PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev);
326int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit);
327int ci_read_smc_sram_dword(struct radeon_device *rdev,
328 u32 smc_address, u32 *value, u32 limit);
329int ci_write_smc_sram_dword(struct radeon_device *rdev,
330 u32 smc_address, u32 value, u32 limit);
331
332#endif
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
new file mode 100644
index 000000000000..53b43dd3cf1e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -0,0 +1,262 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "cikd.h"
29#include "ppsmc.h"
30#include "radeon_ucode.h"
31
32static int ci_set_smc_sram_address(struct radeon_device *rdev,
33 u32 smc_address, u32 limit)
34{
35 if (smc_address & 3)
36 return -EINVAL;
37 if ((smc_address + 3) > limit)
38 return -EINVAL;
39
40 WREG32(SMC_IND_INDEX_0, smc_address);
41 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
42
43 return 0;
44}
45
46int ci_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit)
49{
50 u32 data, original_data;
51 u32 addr;
52 u32 extra_shift;
53 int ret;
54
55 if (smc_start_address & 3)
56 return -EINVAL;
57 if ((smc_start_address + byte_count) > limit)
58 return -EINVAL;
59
60 addr = smc_start_address;
61
62 while (byte_count >= 4) {
63 /* SMC address space is BE */
64 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
65
66 ret = ci_set_smc_sram_address(rdev, addr, limit);
67 if (ret)
68 return ret;
69
70 WREG32(SMC_IND_DATA_0, data);
71
72 src += 4;
73 byte_count -= 4;
74 addr += 4;
75 }
76
77 /* RMW for the final bytes */
78 if (byte_count > 0) {
79 data = 0;
80
81 ret = ci_set_smc_sram_address(rdev, addr, limit);
82 if (ret)
83 return ret;
84
85 original_data = RREG32(SMC_IND_DATA_0);
86
87 extra_shift = 8 * (4 - byte_count);
88
89 while (byte_count > 0) {
90 data = (data << 8) + *src++;
91 byte_count--;
92 }
93
94 data <<= extra_shift;
95
96 data |= (original_data & ~((~0UL) << extra_shift));
97
98 ret = ci_set_smc_sram_address(rdev, addr, limit);
99 if (ret)
100 return ret;
101
102 WREG32(SMC_IND_DATA_0, data);
103 }
104 return 0;
105}
106
107void ci_start_smc(struct radeon_device *rdev)
108{
109 u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
110
111 tmp &= ~RST_REG;
112 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
113}
114
115void ci_reset_smc(struct radeon_device *rdev)
116{
117 u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
118
119 tmp |= RST_REG;
120 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
121}
122
123int ci_program_jump_on_start(struct radeon_device *rdev)
124{
125 static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
126
127 return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
128}
129
130void ci_stop_smc_clock(struct radeon_device *rdev)
131{
132 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
133
134 tmp |= CK_DISABLE;
135
136 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
137}
138
139void ci_start_smc_clock(struct radeon_device *rdev)
140{
141 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
142
143 tmp &= ~CK_DISABLE;
144
145 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
146}
147
148bool ci_is_smc_running(struct radeon_device *rdev)
149{
150 u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
151 u32 pc_c = RREG32_SMC(SMC_PC_C);
152
153 if (!(clk & CK_DISABLE) && (0x20100 <= pc_c))
154 return true;
155
156 return false;
157}
158
159PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
160{
161 u32 tmp;
162 int i;
163
164 if (!ci_is_smc_running(rdev))
165 return PPSMC_Result_Failed;
166
167 WREG32(SMC_MESSAGE_0, msg);
168
169 for (i = 0; i < rdev->usec_timeout; i++) {
170 tmp = RREG32(SMC_RESP_0);
171 if (tmp != 0)
172 break;
173 udelay(1);
174 }
175 tmp = RREG32(SMC_RESP_0);
176
177 return (PPSMC_Result)tmp;
178}
179
180PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
181{
182 u32 tmp;
183 int i;
184
185 if (!ci_is_smc_running(rdev))
186 return PPSMC_Result_OK;
187
188 for (i = 0; i < rdev->usec_timeout; i++) {
189 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
190 if ((tmp & CKEN) == 0)
191 break;
192 udelay(1);
193 }
194
195 return PPSMC_Result_OK;
196}
197
198int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
199{
200 u32 ucode_start_address;
201 u32 ucode_size;
202 const u8 *src;
203 u32 data;
204
205 if (!rdev->smc_fw)
206 return -EINVAL;
207
208 switch (rdev->family) {
209 case CHIP_BONAIRE:
210 ucode_start_address = BONAIRE_SMC_UCODE_START;
211 ucode_size = BONAIRE_SMC_UCODE_SIZE;
212 break;
213 default:
214 DRM_ERROR("unknown asic in smc ucode loader\n");
215 BUG();
216 }
217
218 if (ucode_size & 3)
219 return -EINVAL;
220
221 src = (const u8 *)rdev->smc_fw->data;
222 WREG32(SMC_IND_INDEX_0, ucode_start_address);
223 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
224 while (ucode_size >= 4) {
225 /* SMC address space is BE */
226 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
227
228 WREG32(SMC_IND_DATA_0, data);
229
230 src += 4;
231 ucode_size -= 4;
232 }
233 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
234
235 return 0;
236}
237
238int ci_read_smc_sram_dword(struct radeon_device *rdev,
239 u32 smc_address, u32 *value, u32 limit)
240{
241 int ret;
242
243 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
244 if (ret)
245 return ret;
246
247 *value = RREG32(SMC_IND_DATA_0);
248 return 0;
249}
250
251int ci_write_smc_sram_dword(struct radeon_device *rdev,
252 u32 smc_address, u32 value, u32 limit)
253{
254 int ret;
255
256 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
257 if (ret)
258 return ret;
259
260 WREG32(SMC_IND_DATA_0, value);
261 return 0;
262}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6dacec4e2090..a3bba0587276 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -30,22 +30,8 @@
30#include "cikd.h" 30#include "cikd.h"
31#include "atom.h" 31#include "atom.h"
32#include "cik_blit_shaders.h" 32#include "cik_blit_shaders.h"
33 33#include "radeon_ucode.h"
34/* GFX */ 34#include "clearstate_ci.h"
35#define CIK_PFP_UCODE_SIZE 2144
36#define CIK_ME_UCODE_SIZE 2144
37#define CIK_CE_UCODE_SIZE 2144
38/* compute */
39#define CIK_MEC_UCODE_SIZE 4192
40/* interrupts */
41#define BONAIRE_RLC_UCODE_SIZE 2048
42#define KB_RLC_UCODE_SIZE 2560
43#define KV_RLC_UCODE_SIZE 2560
44/* gddr controller */
45#define CIK_MC_UCODE_SIZE 7866
46/* sdma */
47#define CIK_SDMA_UCODE_SIZE 1050
48#define CIK_SDMA_UCODE_VERSION 64
49 35
50MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin"); 36MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
51MODULE_FIRMWARE("radeon/BONAIRE_me.bin"); 37MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
@@ -54,6 +40,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
54MODULE_FIRMWARE("radeon/BONAIRE_mc.bin"); 40MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
55MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); 41MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
56MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); 42MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
43MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
57MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); 44MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
58MODULE_FIRMWARE("radeon/KAVERI_me.bin"); 45MODULE_FIRMWARE("radeon/KAVERI_me.bin");
59MODULE_FIRMWARE("radeon/KAVERI_ce.bin"); 46MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -72,10 +59,61 @@ extern void r600_ih_ring_fini(struct radeon_device *rdev);
72extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 59extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
73extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 60extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
74extern bool evergreen_is_display_hung(struct radeon_device *rdev); 61extern bool evergreen_is_display_hung(struct radeon_device *rdev);
62extern void sumo_rlc_fini(struct radeon_device *rdev);
63extern int sumo_rlc_init(struct radeon_device *rdev);
75extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 64extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
76extern void si_rlc_fini(struct radeon_device *rdev); 65extern void si_rlc_reset(struct radeon_device *rdev);
77extern int si_rlc_init(struct radeon_device *rdev); 66extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
67extern int cik_sdma_resume(struct radeon_device *rdev);
68extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
69extern void cik_sdma_fini(struct radeon_device *rdev);
70extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
71 struct radeon_ib *ib,
72 uint64_t pe,
73 uint64_t addr, unsigned count,
74 uint32_t incr, uint32_t flags);
78static void cik_rlc_stop(struct radeon_device *rdev); 75static void cik_rlc_stop(struct radeon_device *rdev);
76static void cik_pcie_gen3_enable(struct radeon_device *rdev);
77static void cik_program_aspm(struct radeon_device *rdev);
78static void cik_init_pg(struct radeon_device *rdev);
79static void cik_init_cg(struct radeon_device *rdev);
80
81/* get temperature in millidegrees */
82int ci_get_temp(struct radeon_device *rdev)
83{
84 u32 temp;
85 int actual_temp = 0;
86
87 temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
88 CTF_TEMP_SHIFT;
89
90 if (temp & 0x200)
91 actual_temp = 255;
92 else
93 actual_temp = temp & 0x1ff;
94
95 actual_temp = actual_temp * 1000;
96
97 return actual_temp;
98}
99
100/* get temperature in millidegrees */
101int kv_get_temp(struct radeon_device *rdev)
102{
103 u32 temp;
104 int actual_temp = 0;
105
106 temp = RREG32_SMC(0xC0300E0C);
107
108 if (temp)
109 actual_temp = (temp / 8) - 49;
110 else
111 actual_temp = 0;
112
113 actual_temp = actual_temp * 1000;
114
115 return actual_temp;
116}
79 117
80/* 118/*
81 * Indirect registers accessor 119 * Indirect registers accessor
@@ -98,6 +136,778 @@ void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
98 (void)RREG32(PCIE_DATA); 136 (void)RREG32(PCIE_DATA);
99} 137}
100 138
139static const u32 spectre_rlc_save_restore_register_list[] =
140{
141 (0x0e00 << 16) | (0xc12c >> 2),
142 0x00000000,
143 (0x0e00 << 16) | (0xc140 >> 2),
144 0x00000000,
145 (0x0e00 << 16) | (0xc150 >> 2),
146 0x00000000,
147 (0x0e00 << 16) | (0xc15c >> 2),
148 0x00000000,
149 (0x0e00 << 16) | (0xc168 >> 2),
150 0x00000000,
151 (0x0e00 << 16) | (0xc170 >> 2),
152 0x00000000,
153 (0x0e00 << 16) | (0xc178 >> 2),
154 0x00000000,
155 (0x0e00 << 16) | (0xc204 >> 2),
156 0x00000000,
157 (0x0e00 << 16) | (0xc2b4 >> 2),
158 0x00000000,
159 (0x0e00 << 16) | (0xc2b8 >> 2),
160 0x00000000,
161 (0x0e00 << 16) | (0xc2bc >> 2),
162 0x00000000,
163 (0x0e00 << 16) | (0xc2c0 >> 2),
164 0x00000000,
165 (0x0e00 << 16) | (0x8228 >> 2),
166 0x00000000,
167 (0x0e00 << 16) | (0x829c >> 2),
168 0x00000000,
169 (0x0e00 << 16) | (0x869c >> 2),
170 0x00000000,
171 (0x0600 << 16) | (0x98f4 >> 2),
172 0x00000000,
173 (0x0e00 << 16) | (0x98f8 >> 2),
174 0x00000000,
175 (0x0e00 << 16) | (0x9900 >> 2),
176 0x00000000,
177 (0x0e00 << 16) | (0xc260 >> 2),
178 0x00000000,
179 (0x0e00 << 16) | (0x90e8 >> 2),
180 0x00000000,
181 (0x0e00 << 16) | (0x3c000 >> 2),
182 0x00000000,
183 (0x0e00 << 16) | (0x3c00c >> 2),
184 0x00000000,
185 (0x0e00 << 16) | (0x8c1c >> 2),
186 0x00000000,
187 (0x0e00 << 16) | (0x9700 >> 2),
188 0x00000000,
189 (0x0e00 << 16) | (0xcd20 >> 2),
190 0x00000000,
191 (0x4e00 << 16) | (0xcd20 >> 2),
192 0x00000000,
193 (0x5e00 << 16) | (0xcd20 >> 2),
194 0x00000000,
195 (0x6e00 << 16) | (0xcd20 >> 2),
196 0x00000000,
197 (0x7e00 << 16) | (0xcd20 >> 2),
198 0x00000000,
199 (0x8e00 << 16) | (0xcd20 >> 2),
200 0x00000000,
201 (0x9e00 << 16) | (0xcd20 >> 2),
202 0x00000000,
203 (0xae00 << 16) | (0xcd20 >> 2),
204 0x00000000,
205 (0xbe00 << 16) | (0xcd20 >> 2),
206 0x00000000,
207 (0x0e00 << 16) | (0x89bc >> 2),
208 0x00000000,
209 (0x0e00 << 16) | (0x8900 >> 2),
210 0x00000000,
211 0x3,
212 (0x0e00 << 16) | (0xc130 >> 2),
213 0x00000000,
214 (0x0e00 << 16) | (0xc134 >> 2),
215 0x00000000,
216 (0x0e00 << 16) | (0xc1fc >> 2),
217 0x00000000,
218 (0x0e00 << 16) | (0xc208 >> 2),
219 0x00000000,
220 (0x0e00 << 16) | (0xc264 >> 2),
221 0x00000000,
222 (0x0e00 << 16) | (0xc268 >> 2),
223 0x00000000,
224 (0x0e00 << 16) | (0xc26c >> 2),
225 0x00000000,
226 (0x0e00 << 16) | (0xc270 >> 2),
227 0x00000000,
228 (0x0e00 << 16) | (0xc274 >> 2),
229 0x00000000,
230 (0x0e00 << 16) | (0xc278 >> 2),
231 0x00000000,
232 (0x0e00 << 16) | (0xc27c >> 2),
233 0x00000000,
234 (0x0e00 << 16) | (0xc280 >> 2),
235 0x00000000,
236 (0x0e00 << 16) | (0xc284 >> 2),
237 0x00000000,
238 (0x0e00 << 16) | (0xc288 >> 2),
239 0x00000000,
240 (0x0e00 << 16) | (0xc28c >> 2),
241 0x00000000,
242 (0x0e00 << 16) | (0xc290 >> 2),
243 0x00000000,
244 (0x0e00 << 16) | (0xc294 >> 2),
245 0x00000000,
246 (0x0e00 << 16) | (0xc298 >> 2),
247 0x00000000,
248 (0x0e00 << 16) | (0xc29c >> 2),
249 0x00000000,
250 (0x0e00 << 16) | (0xc2a0 >> 2),
251 0x00000000,
252 (0x0e00 << 16) | (0xc2a4 >> 2),
253 0x00000000,
254 (0x0e00 << 16) | (0xc2a8 >> 2),
255 0x00000000,
256 (0x0e00 << 16) | (0xc2ac >> 2),
257 0x00000000,
258 (0x0e00 << 16) | (0xc2b0 >> 2),
259 0x00000000,
260 (0x0e00 << 16) | (0x301d0 >> 2),
261 0x00000000,
262 (0x0e00 << 16) | (0x30238 >> 2),
263 0x00000000,
264 (0x0e00 << 16) | (0x30250 >> 2),
265 0x00000000,
266 (0x0e00 << 16) | (0x30254 >> 2),
267 0x00000000,
268 (0x0e00 << 16) | (0x30258 >> 2),
269 0x00000000,
270 (0x0e00 << 16) | (0x3025c >> 2),
271 0x00000000,
272 (0x4e00 << 16) | (0xc900 >> 2),
273 0x00000000,
274 (0x5e00 << 16) | (0xc900 >> 2),
275 0x00000000,
276 (0x6e00 << 16) | (0xc900 >> 2),
277 0x00000000,
278 (0x7e00 << 16) | (0xc900 >> 2),
279 0x00000000,
280 (0x8e00 << 16) | (0xc900 >> 2),
281 0x00000000,
282 (0x9e00 << 16) | (0xc900 >> 2),
283 0x00000000,
284 (0xae00 << 16) | (0xc900 >> 2),
285 0x00000000,
286 (0xbe00 << 16) | (0xc900 >> 2),
287 0x00000000,
288 (0x4e00 << 16) | (0xc904 >> 2),
289 0x00000000,
290 (0x5e00 << 16) | (0xc904 >> 2),
291 0x00000000,
292 (0x6e00 << 16) | (0xc904 >> 2),
293 0x00000000,
294 (0x7e00 << 16) | (0xc904 >> 2),
295 0x00000000,
296 (0x8e00 << 16) | (0xc904 >> 2),
297 0x00000000,
298 (0x9e00 << 16) | (0xc904 >> 2),
299 0x00000000,
300 (0xae00 << 16) | (0xc904 >> 2),
301 0x00000000,
302 (0xbe00 << 16) | (0xc904 >> 2),
303 0x00000000,
304 (0x4e00 << 16) | (0xc908 >> 2),
305 0x00000000,
306 (0x5e00 << 16) | (0xc908 >> 2),
307 0x00000000,
308 (0x6e00 << 16) | (0xc908 >> 2),
309 0x00000000,
310 (0x7e00 << 16) | (0xc908 >> 2),
311 0x00000000,
312 (0x8e00 << 16) | (0xc908 >> 2),
313 0x00000000,
314 (0x9e00 << 16) | (0xc908 >> 2),
315 0x00000000,
316 (0xae00 << 16) | (0xc908 >> 2),
317 0x00000000,
318 (0xbe00 << 16) | (0xc908 >> 2),
319 0x00000000,
320 (0x4e00 << 16) | (0xc90c >> 2),
321 0x00000000,
322 (0x5e00 << 16) | (0xc90c >> 2),
323 0x00000000,
324 (0x6e00 << 16) | (0xc90c >> 2),
325 0x00000000,
326 (0x7e00 << 16) | (0xc90c >> 2),
327 0x00000000,
328 (0x8e00 << 16) | (0xc90c >> 2),
329 0x00000000,
330 (0x9e00 << 16) | (0xc90c >> 2),
331 0x00000000,
332 (0xae00 << 16) | (0xc90c >> 2),
333 0x00000000,
334 (0xbe00 << 16) | (0xc90c >> 2),
335 0x00000000,
336 (0x4e00 << 16) | (0xc910 >> 2),
337 0x00000000,
338 (0x5e00 << 16) | (0xc910 >> 2),
339 0x00000000,
340 (0x6e00 << 16) | (0xc910 >> 2),
341 0x00000000,
342 (0x7e00 << 16) | (0xc910 >> 2),
343 0x00000000,
344 (0x8e00 << 16) | (0xc910 >> 2),
345 0x00000000,
346 (0x9e00 << 16) | (0xc910 >> 2),
347 0x00000000,
348 (0xae00 << 16) | (0xc910 >> 2),
349 0x00000000,
350 (0xbe00 << 16) | (0xc910 >> 2),
351 0x00000000,
352 (0x0e00 << 16) | (0xc99c >> 2),
353 0x00000000,
354 (0x0e00 << 16) | (0x9834 >> 2),
355 0x00000000,
356 (0x0000 << 16) | (0x30f00 >> 2),
357 0x00000000,
358 (0x0001 << 16) | (0x30f00 >> 2),
359 0x00000000,
360 (0x0000 << 16) | (0x30f04 >> 2),
361 0x00000000,
362 (0x0001 << 16) | (0x30f04 >> 2),
363 0x00000000,
364 (0x0000 << 16) | (0x30f08 >> 2),
365 0x00000000,
366 (0x0001 << 16) | (0x30f08 >> 2),
367 0x00000000,
368 (0x0000 << 16) | (0x30f0c >> 2),
369 0x00000000,
370 (0x0001 << 16) | (0x30f0c >> 2),
371 0x00000000,
372 (0x0600 << 16) | (0x9b7c >> 2),
373 0x00000000,
374 (0x0e00 << 16) | (0x8a14 >> 2),
375 0x00000000,
376 (0x0e00 << 16) | (0x8a18 >> 2),
377 0x00000000,
378 (0x0600 << 16) | (0x30a00 >> 2),
379 0x00000000,
380 (0x0e00 << 16) | (0x8bf0 >> 2),
381 0x00000000,
382 (0x0e00 << 16) | (0x8bcc >> 2),
383 0x00000000,
384 (0x0e00 << 16) | (0x8b24 >> 2),
385 0x00000000,
386 (0x0e00 << 16) | (0x30a04 >> 2),
387 0x00000000,
388 (0x0600 << 16) | (0x30a10 >> 2),
389 0x00000000,
390 (0x0600 << 16) | (0x30a14 >> 2),
391 0x00000000,
392 (0x0600 << 16) | (0x30a18 >> 2),
393 0x00000000,
394 (0x0600 << 16) | (0x30a2c >> 2),
395 0x00000000,
396 (0x0e00 << 16) | (0xc700 >> 2),
397 0x00000000,
398 (0x0e00 << 16) | (0xc704 >> 2),
399 0x00000000,
400 (0x0e00 << 16) | (0xc708 >> 2),
401 0x00000000,
402 (0x0e00 << 16) | (0xc768 >> 2),
403 0x00000000,
404 (0x0400 << 16) | (0xc770 >> 2),
405 0x00000000,
406 (0x0400 << 16) | (0xc774 >> 2),
407 0x00000000,
408 (0x0400 << 16) | (0xc778 >> 2),
409 0x00000000,
410 (0x0400 << 16) | (0xc77c >> 2),
411 0x00000000,
412 (0x0400 << 16) | (0xc780 >> 2),
413 0x00000000,
414 (0x0400 << 16) | (0xc784 >> 2),
415 0x00000000,
416 (0x0400 << 16) | (0xc788 >> 2),
417 0x00000000,
418 (0x0400 << 16) | (0xc78c >> 2),
419 0x00000000,
420 (0x0400 << 16) | (0xc798 >> 2),
421 0x00000000,
422 (0x0400 << 16) | (0xc79c >> 2),
423 0x00000000,
424 (0x0400 << 16) | (0xc7a0 >> 2),
425 0x00000000,
426 (0x0400 << 16) | (0xc7a4 >> 2),
427 0x00000000,
428 (0x0400 << 16) | (0xc7a8 >> 2),
429 0x00000000,
430 (0x0400 << 16) | (0xc7ac >> 2),
431 0x00000000,
432 (0x0400 << 16) | (0xc7b0 >> 2),
433 0x00000000,
434 (0x0400 << 16) | (0xc7b4 >> 2),
435 0x00000000,
436 (0x0e00 << 16) | (0x9100 >> 2),
437 0x00000000,
438 (0x0e00 << 16) | (0x3c010 >> 2),
439 0x00000000,
440 (0x0e00 << 16) | (0x92a8 >> 2),
441 0x00000000,
442 (0x0e00 << 16) | (0x92ac >> 2),
443 0x00000000,
444 (0x0e00 << 16) | (0x92b4 >> 2),
445 0x00000000,
446 (0x0e00 << 16) | (0x92b8 >> 2),
447 0x00000000,
448 (0x0e00 << 16) | (0x92bc >> 2),
449 0x00000000,
450 (0x0e00 << 16) | (0x92c0 >> 2),
451 0x00000000,
452 (0x0e00 << 16) | (0x92c4 >> 2),
453 0x00000000,
454 (0x0e00 << 16) | (0x92c8 >> 2),
455 0x00000000,
456 (0x0e00 << 16) | (0x92cc >> 2),
457 0x00000000,
458 (0x0e00 << 16) | (0x92d0 >> 2),
459 0x00000000,
460 (0x0e00 << 16) | (0x8c00 >> 2),
461 0x00000000,
462 (0x0e00 << 16) | (0x8c04 >> 2),
463 0x00000000,
464 (0x0e00 << 16) | (0x8c20 >> 2),
465 0x00000000,
466 (0x0e00 << 16) | (0x8c38 >> 2),
467 0x00000000,
468 (0x0e00 << 16) | (0x8c3c >> 2),
469 0x00000000,
470 (0x0e00 << 16) | (0xae00 >> 2),
471 0x00000000,
472 (0x0e00 << 16) | (0x9604 >> 2),
473 0x00000000,
474 (0x0e00 << 16) | (0xac08 >> 2),
475 0x00000000,
476 (0x0e00 << 16) | (0xac0c >> 2),
477 0x00000000,
478 (0x0e00 << 16) | (0xac10 >> 2),
479 0x00000000,
480 (0x0e00 << 16) | (0xac14 >> 2),
481 0x00000000,
482 (0x0e00 << 16) | (0xac58 >> 2),
483 0x00000000,
484 (0x0e00 << 16) | (0xac68 >> 2),
485 0x00000000,
486 (0x0e00 << 16) | (0xac6c >> 2),
487 0x00000000,
488 (0x0e00 << 16) | (0xac70 >> 2),
489 0x00000000,
490 (0x0e00 << 16) | (0xac74 >> 2),
491 0x00000000,
492 (0x0e00 << 16) | (0xac78 >> 2),
493 0x00000000,
494 (0x0e00 << 16) | (0xac7c >> 2),
495 0x00000000,
496 (0x0e00 << 16) | (0xac80 >> 2),
497 0x00000000,
498 (0x0e00 << 16) | (0xac84 >> 2),
499 0x00000000,
500 (0x0e00 << 16) | (0xac88 >> 2),
501 0x00000000,
502 (0x0e00 << 16) | (0xac8c >> 2),
503 0x00000000,
504 (0x0e00 << 16) | (0x970c >> 2),
505 0x00000000,
506 (0x0e00 << 16) | (0x9714 >> 2),
507 0x00000000,
508 (0x0e00 << 16) | (0x9718 >> 2),
509 0x00000000,
510 (0x0e00 << 16) | (0x971c >> 2),
511 0x00000000,
512 (0x0e00 << 16) | (0x31068 >> 2),
513 0x00000000,
514 (0x4e00 << 16) | (0x31068 >> 2),
515 0x00000000,
516 (0x5e00 << 16) | (0x31068 >> 2),
517 0x00000000,
518 (0x6e00 << 16) | (0x31068 >> 2),
519 0x00000000,
520 (0x7e00 << 16) | (0x31068 >> 2),
521 0x00000000,
522 (0x8e00 << 16) | (0x31068 >> 2),
523 0x00000000,
524 (0x9e00 << 16) | (0x31068 >> 2),
525 0x00000000,
526 (0xae00 << 16) | (0x31068 >> 2),
527 0x00000000,
528 (0xbe00 << 16) | (0x31068 >> 2),
529 0x00000000,
530 (0x0e00 << 16) | (0xcd10 >> 2),
531 0x00000000,
532 (0x0e00 << 16) | (0xcd14 >> 2),
533 0x00000000,
534 (0x0e00 << 16) | (0x88b0 >> 2),
535 0x00000000,
536 (0x0e00 << 16) | (0x88b4 >> 2),
537 0x00000000,
538 (0x0e00 << 16) | (0x88b8 >> 2),
539 0x00000000,
540 (0x0e00 << 16) | (0x88bc >> 2),
541 0x00000000,
542 (0x0400 << 16) | (0x89c0 >> 2),
543 0x00000000,
544 (0x0e00 << 16) | (0x88c4 >> 2),
545 0x00000000,
546 (0x0e00 << 16) | (0x88c8 >> 2),
547 0x00000000,
548 (0x0e00 << 16) | (0x88d0 >> 2),
549 0x00000000,
550 (0x0e00 << 16) | (0x88d4 >> 2),
551 0x00000000,
552 (0x0e00 << 16) | (0x88d8 >> 2),
553 0x00000000,
554 (0x0e00 << 16) | (0x8980 >> 2),
555 0x00000000,
556 (0x0e00 << 16) | (0x30938 >> 2),
557 0x00000000,
558 (0x0e00 << 16) | (0x3093c >> 2),
559 0x00000000,
560 (0x0e00 << 16) | (0x30940 >> 2),
561 0x00000000,
562 (0x0e00 << 16) | (0x89a0 >> 2),
563 0x00000000,
564 (0x0e00 << 16) | (0x30900 >> 2),
565 0x00000000,
566 (0x0e00 << 16) | (0x30904 >> 2),
567 0x00000000,
568 (0x0e00 << 16) | (0x89b4 >> 2),
569 0x00000000,
570 (0x0e00 << 16) | (0x3c210 >> 2),
571 0x00000000,
572 (0x0e00 << 16) | (0x3c214 >> 2),
573 0x00000000,
574 (0x0e00 << 16) | (0x3c218 >> 2),
575 0x00000000,
576 (0x0e00 << 16) | (0x8904 >> 2),
577 0x00000000,
578 0x5,
579 (0x0e00 << 16) | (0x8c28 >> 2),
580 (0x0e00 << 16) | (0x8c2c >> 2),
581 (0x0e00 << 16) | (0x8c30 >> 2),
582 (0x0e00 << 16) | (0x8c34 >> 2),
583 (0x0e00 << 16) | (0x9600 >> 2),
584};
585
586static const u32 kalindi_rlc_save_restore_register_list[] =
587{
588 (0x0e00 << 16) | (0xc12c >> 2),
589 0x00000000,
590 (0x0e00 << 16) | (0xc140 >> 2),
591 0x00000000,
592 (0x0e00 << 16) | (0xc150 >> 2),
593 0x00000000,
594 (0x0e00 << 16) | (0xc15c >> 2),
595 0x00000000,
596 (0x0e00 << 16) | (0xc168 >> 2),
597 0x00000000,
598 (0x0e00 << 16) | (0xc170 >> 2),
599 0x00000000,
600 (0x0e00 << 16) | (0xc204 >> 2),
601 0x00000000,
602 (0x0e00 << 16) | (0xc2b4 >> 2),
603 0x00000000,
604 (0x0e00 << 16) | (0xc2b8 >> 2),
605 0x00000000,
606 (0x0e00 << 16) | (0xc2bc >> 2),
607 0x00000000,
608 (0x0e00 << 16) | (0xc2c0 >> 2),
609 0x00000000,
610 (0x0e00 << 16) | (0x8228 >> 2),
611 0x00000000,
612 (0x0e00 << 16) | (0x829c >> 2),
613 0x00000000,
614 (0x0e00 << 16) | (0x869c >> 2),
615 0x00000000,
616 (0x0600 << 16) | (0x98f4 >> 2),
617 0x00000000,
618 (0x0e00 << 16) | (0x98f8 >> 2),
619 0x00000000,
620 (0x0e00 << 16) | (0x9900 >> 2),
621 0x00000000,
622 (0x0e00 << 16) | (0xc260 >> 2),
623 0x00000000,
624 (0x0e00 << 16) | (0x90e8 >> 2),
625 0x00000000,
626 (0x0e00 << 16) | (0x3c000 >> 2),
627 0x00000000,
628 (0x0e00 << 16) | (0x3c00c >> 2),
629 0x00000000,
630 (0x0e00 << 16) | (0x8c1c >> 2),
631 0x00000000,
632 (0x0e00 << 16) | (0x9700 >> 2),
633 0x00000000,
634 (0x0e00 << 16) | (0xcd20 >> 2),
635 0x00000000,
636 (0x4e00 << 16) | (0xcd20 >> 2),
637 0x00000000,
638 (0x5e00 << 16) | (0xcd20 >> 2),
639 0x00000000,
640 (0x6e00 << 16) | (0xcd20 >> 2),
641 0x00000000,
642 (0x7e00 << 16) | (0xcd20 >> 2),
643 0x00000000,
644 (0x0e00 << 16) | (0x89bc >> 2),
645 0x00000000,
646 (0x0e00 << 16) | (0x8900 >> 2),
647 0x00000000,
648 0x3,
649 (0x0e00 << 16) | (0xc130 >> 2),
650 0x00000000,
651 (0x0e00 << 16) | (0xc134 >> 2),
652 0x00000000,
653 (0x0e00 << 16) | (0xc1fc >> 2),
654 0x00000000,
655 (0x0e00 << 16) | (0xc208 >> 2),
656 0x00000000,
657 (0x0e00 << 16) | (0xc264 >> 2),
658 0x00000000,
659 (0x0e00 << 16) | (0xc268 >> 2),
660 0x00000000,
661 (0x0e00 << 16) | (0xc26c >> 2),
662 0x00000000,
663 (0x0e00 << 16) | (0xc270 >> 2),
664 0x00000000,
665 (0x0e00 << 16) | (0xc274 >> 2),
666 0x00000000,
667 (0x0e00 << 16) | (0xc28c >> 2),
668 0x00000000,
669 (0x0e00 << 16) | (0xc290 >> 2),
670 0x00000000,
671 (0x0e00 << 16) | (0xc294 >> 2),
672 0x00000000,
673 (0x0e00 << 16) | (0xc298 >> 2),
674 0x00000000,
675 (0x0e00 << 16) | (0xc2a0 >> 2),
676 0x00000000,
677 (0x0e00 << 16) | (0xc2a4 >> 2),
678 0x00000000,
679 (0x0e00 << 16) | (0xc2a8 >> 2),
680 0x00000000,
681 (0x0e00 << 16) | (0xc2ac >> 2),
682 0x00000000,
683 (0x0e00 << 16) | (0x301d0 >> 2),
684 0x00000000,
685 (0x0e00 << 16) | (0x30238 >> 2),
686 0x00000000,
687 (0x0e00 << 16) | (0x30250 >> 2),
688 0x00000000,
689 (0x0e00 << 16) | (0x30254 >> 2),
690 0x00000000,
691 (0x0e00 << 16) | (0x30258 >> 2),
692 0x00000000,
693 (0x0e00 << 16) | (0x3025c >> 2),
694 0x00000000,
695 (0x4e00 << 16) | (0xc900 >> 2),
696 0x00000000,
697 (0x5e00 << 16) | (0xc900 >> 2),
698 0x00000000,
699 (0x6e00 << 16) | (0xc900 >> 2),
700 0x00000000,
701 (0x7e00 << 16) | (0xc900 >> 2),
702 0x00000000,
703 (0x4e00 << 16) | (0xc904 >> 2),
704 0x00000000,
705 (0x5e00 << 16) | (0xc904 >> 2),
706 0x00000000,
707 (0x6e00 << 16) | (0xc904 >> 2),
708 0x00000000,
709 (0x7e00 << 16) | (0xc904 >> 2),
710 0x00000000,
711 (0x4e00 << 16) | (0xc908 >> 2),
712 0x00000000,
713 (0x5e00 << 16) | (0xc908 >> 2),
714 0x00000000,
715 (0x6e00 << 16) | (0xc908 >> 2),
716 0x00000000,
717 (0x7e00 << 16) | (0xc908 >> 2),
718 0x00000000,
719 (0x4e00 << 16) | (0xc90c >> 2),
720 0x00000000,
721 (0x5e00 << 16) | (0xc90c >> 2),
722 0x00000000,
723 (0x6e00 << 16) | (0xc90c >> 2),
724 0x00000000,
725 (0x7e00 << 16) | (0xc90c >> 2),
726 0x00000000,
727 (0x4e00 << 16) | (0xc910 >> 2),
728 0x00000000,
729 (0x5e00 << 16) | (0xc910 >> 2),
730 0x00000000,
731 (0x6e00 << 16) | (0xc910 >> 2),
732 0x00000000,
733 (0x7e00 << 16) | (0xc910 >> 2),
734 0x00000000,
735 (0x0e00 << 16) | (0xc99c >> 2),
736 0x00000000,
737 (0x0e00 << 16) | (0x9834 >> 2),
738 0x00000000,
739 (0x0000 << 16) | (0x30f00 >> 2),
740 0x00000000,
741 (0x0000 << 16) | (0x30f04 >> 2),
742 0x00000000,
743 (0x0000 << 16) | (0x30f08 >> 2),
744 0x00000000,
745 (0x0000 << 16) | (0x30f0c >> 2),
746 0x00000000,
747 (0x0600 << 16) | (0x9b7c >> 2),
748 0x00000000,
749 (0x0e00 << 16) | (0x8a14 >> 2),
750 0x00000000,
751 (0x0e00 << 16) | (0x8a18 >> 2),
752 0x00000000,
753 (0x0600 << 16) | (0x30a00 >> 2),
754 0x00000000,
755 (0x0e00 << 16) | (0x8bf0 >> 2),
756 0x00000000,
757 (0x0e00 << 16) | (0x8bcc >> 2),
758 0x00000000,
759 (0x0e00 << 16) | (0x8b24 >> 2),
760 0x00000000,
761 (0x0e00 << 16) | (0x30a04 >> 2),
762 0x00000000,
763 (0x0600 << 16) | (0x30a10 >> 2),
764 0x00000000,
765 (0x0600 << 16) | (0x30a14 >> 2),
766 0x00000000,
767 (0x0600 << 16) | (0x30a18 >> 2),
768 0x00000000,
769 (0x0600 << 16) | (0x30a2c >> 2),
770 0x00000000,
771 (0x0e00 << 16) | (0xc700 >> 2),
772 0x00000000,
773 (0x0e00 << 16) | (0xc704 >> 2),
774 0x00000000,
775 (0x0e00 << 16) | (0xc708 >> 2),
776 0x00000000,
777 (0x0e00 << 16) | (0xc768 >> 2),
778 0x00000000,
779 (0x0400 << 16) | (0xc770 >> 2),
780 0x00000000,
781 (0x0400 << 16) | (0xc774 >> 2),
782 0x00000000,
783 (0x0400 << 16) | (0xc798 >> 2),
784 0x00000000,
785 (0x0400 << 16) | (0xc79c >> 2),
786 0x00000000,
787 (0x0e00 << 16) | (0x9100 >> 2),
788 0x00000000,
789 (0x0e00 << 16) | (0x3c010 >> 2),
790 0x00000000,
791 (0x0e00 << 16) | (0x8c00 >> 2),
792 0x00000000,
793 (0x0e00 << 16) | (0x8c04 >> 2),
794 0x00000000,
795 (0x0e00 << 16) | (0x8c20 >> 2),
796 0x00000000,
797 (0x0e00 << 16) | (0x8c38 >> 2),
798 0x00000000,
799 (0x0e00 << 16) | (0x8c3c >> 2),
800 0x00000000,
801 (0x0e00 << 16) | (0xae00 >> 2),
802 0x00000000,
803 (0x0e00 << 16) | (0x9604 >> 2),
804 0x00000000,
805 (0x0e00 << 16) | (0xac08 >> 2),
806 0x00000000,
807 (0x0e00 << 16) | (0xac0c >> 2),
808 0x00000000,
809 (0x0e00 << 16) | (0xac10 >> 2),
810 0x00000000,
811 (0x0e00 << 16) | (0xac14 >> 2),
812 0x00000000,
813 (0x0e00 << 16) | (0xac58 >> 2),
814 0x00000000,
815 (0x0e00 << 16) | (0xac68 >> 2),
816 0x00000000,
817 (0x0e00 << 16) | (0xac6c >> 2),
818 0x00000000,
819 (0x0e00 << 16) | (0xac70 >> 2),
820 0x00000000,
821 (0x0e00 << 16) | (0xac74 >> 2),
822 0x00000000,
823 (0x0e00 << 16) | (0xac78 >> 2),
824 0x00000000,
825 (0x0e00 << 16) | (0xac7c >> 2),
826 0x00000000,
827 (0x0e00 << 16) | (0xac80 >> 2),
828 0x00000000,
829 (0x0e00 << 16) | (0xac84 >> 2),
830 0x00000000,
831 (0x0e00 << 16) | (0xac88 >> 2),
832 0x00000000,
833 (0x0e00 << 16) | (0xac8c >> 2),
834 0x00000000,
835 (0x0e00 << 16) | (0x970c >> 2),
836 0x00000000,
837 (0x0e00 << 16) | (0x9714 >> 2),
838 0x00000000,
839 (0x0e00 << 16) | (0x9718 >> 2),
840 0x00000000,
841 (0x0e00 << 16) | (0x971c >> 2),
842 0x00000000,
843 (0x0e00 << 16) | (0x31068 >> 2),
844 0x00000000,
845 (0x4e00 << 16) | (0x31068 >> 2),
846 0x00000000,
847 (0x5e00 << 16) | (0x31068 >> 2),
848 0x00000000,
849 (0x6e00 << 16) | (0x31068 >> 2),
850 0x00000000,
851 (0x7e00 << 16) | (0x31068 >> 2),
852 0x00000000,
853 (0x0e00 << 16) | (0xcd10 >> 2),
854 0x00000000,
855 (0x0e00 << 16) | (0xcd14 >> 2),
856 0x00000000,
857 (0x0e00 << 16) | (0x88b0 >> 2),
858 0x00000000,
859 (0x0e00 << 16) | (0x88b4 >> 2),
860 0x00000000,
861 (0x0e00 << 16) | (0x88b8 >> 2),
862 0x00000000,
863 (0x0e00 << 16) | (0x88bc >> 2),
864 0x00000000,
865 (0x0400 << 16) | (0x89c0 >> 2),
866 0x00000000,
867 (0x0e00 << 16) | (0x88c4 >> 2),
868 0x00000000,
869 (0x0e00 << 16) | (0x88c8 >> 2),
870 0x00000000,
871 (0x0e00 << 16) | (0x88d0 >> 2),
872 0x00000000,
873 (0x0e00 << 16) | (0x88d4 >> 2),
874 0x00000000,
875 (0x0e00 << 16) | (0x88d8 >> 2),
876 0x00000000,
877 (0x0e00 << 16) | (0x8980 >> 2),
878 0x00000000,
879 (0x0e00 << 16) | (0x30938 >> 2),
880 0x00000000,
881 (0x0e00 << 16) | (0x3093c >> 2),
882 0x00000000,
883 (0x0e00 << 16) | (0x30940 >> 2),
884 0x00000000,
885 (0x0e00 << 16) | (0x89a0 >> 2),
886 0x00000000,
887 (0x0e00 << 16) | (0x30900 >> 2),
888 0x00000000,
889 (0x0e00 << 16) | (0x30904 >> 2),
890 0x00000000,
891 (0x0e00 << 16) | (0x89b4 >> 2),
892 0x00000000,
893 (0x0e00 << 16) | (0x3e1fc >> 2),
894 0x00000000,
895 (0x0e00 << 16) | (0x3c210 >> 2),
896 0x00000000,
897 (0x0e00 << 16) | (0x3c214 >> 2),
898 0x00000000,
899 (0x0e00 << 16) | (0x3c218 >> 2),
900 0x00000000,
901 (0x0e00 << 16) | (0x8904 >> 2),
902 0x00000000,
903 0x5,
904 (0x0e00 << 16) | (0x8c28 >> 2),
905 (0x0e00 << 16) | (0x8c2c >> 2),
906 (0x0e00 << 16) | (0x8c30 >> 2),
907 (0x0e00 << 16) | (0x8c34 >> 2),
908 (0x0e00 << 16) | (0x9600 >> 2),
909};
910
101static const u32 bonaire_golden_spm_registers[] = 911static const u32 bonaire_golden_spm_registers[] =
102{ 912{
103 0x30800, 0xe0ffffff, 0xe0000000 913 0x30800, 0xe0ffffff, 0xe0000000
@@ -744,7 +1554,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
744 const char *chip_name; 1554 const char *chip_name;
745 size_t pfp_req_size, me_req_size, ce_req_size, 1555 size_t pfp_req_size, me_req_size, ce_req_size,
746 mec_req_size, rlc_req_size, mc_req_size, 1556 mec_req_size, rlc_req_size, mc_req_size,
747 sdma_req_size; 1557 sdma_req_size, smc_req_size;
748 char fw_name[30]; 1558 char fw_name[30];
749 int err; 1559 int err;
750 1560
@@ -760,6 +1570,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
760 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; 1570 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
761 mc_req_size = CIK_MC_UCODE_SIZE * 4; 1571 mc_req_size = CIK_MC_UCODE_SIZE * 4;
762 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1572 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1573 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
763 break; 1574 break;
764 case CHIP_KAVERI: 1575 case CHIP_KAVERI:
765 chip_name = "KAVERI"; 1576 chip_name = "KAVERI";
@@ -851,7 +1662,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
851 err = -EINVAL; 1662 err = -EINVAL;
852 } 1663 }
853 1664
854 /* No MC ucode on APUs */ 1665 /* No SMC, MC ucode on APUs */
855 if (!(rdev->flags & RADEON_IS_IGP)) { 1666 if (!(rdev->flags & RADEON_IS_IGP)) {
856 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 1667 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
857 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1668 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
@@ -863,6 +1674,21 @@ static int cik_init_microcode(struct radeon_device *rdev)
863 rdev->mc_fw->size, fw_name); 1674 rdev->mc_fw->size, fw_name);
864 err = -EINVAL; 1675 err = -EINVAL;
865 } 1676 }
1677
1678 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1679 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1680 if (err) {
1681 printk(KERN_ERR
1682 "smc: error loading firmware \"%s\"\n",
1683 fw_name);
1684 release_firmware(rdev->smc_fw);
1685 rdev->smc_fw = NULL;
1686 } else if (rdev->smc_fw->size != smc_req_size) {
1687 printk(KERN_ERR
1688 "cik_smc: Bogus length %zu in firmware \"%s\"\n",
1689 rdev->smc_fw->size, fw_name);
1690 err = -EINVAL;
1691 }
866 } 1692 }
867 1693
868out: 1694out:
@@ -881,6 +1707,8 @@ out:
881 rdev->rlc_fw = NULL; 1707 rdev->rlc_fw = NULL;
882 release_firmware(rdev->mc_fw); 1708 release_firmware(rdev->mc_fw);
883 rdev->mc_fw = NULL; 1709 rdev->mc_fw = NULL;
1710 release_firmware(rdev->smc_fw);
1711 rdev->smc_fw = NULL;
884 } 1712 }
885 return err; 1713 return err;
886} 1714}
@@ -1880,7 +2708,46 @@ static void cik_gpu_init(struct radeon_device *rdev)
1880 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 2708 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1881 break; 2709 break;
1882 case CHIP_KAVERI: 2710 case CHIP_KAVERI:
1883 /* TODO */ 2711 rdev->config.cik.max_shader_engines = 1;
2712 rdev->config.cik.max_tile_pipes = 4;
2713 if ((rdev->pdev->device == 0x1304) ||
2714 (rdev->pdev->device == 0x1305) ||
2715 (rdev->pdev->device == 0x130C) ||
2716 (rdev->pdev->device == 0x130F) ||
2717 (rdev->pdev->device == 0x1310) ||
2718 (rdev->pdev->device == 0x1311) ||
2719 (rdev->pdev->device == 0x131C)) {
2720 rdev->config.cik.max_cu_per_sh = 8;
2721 rdev->config.cik.max_backends_per_se = 2;
2722 } else if ((rdev->pdev->device == 0x1309) ||
2723 (rdev->pdev->device == 0x130A) ||
2724 (rdev->pdev->device == 0x130D) ||
2725 (rdev->pdev->device == 0x1313)) {
2726 rdev->config.cik.max_cu_per_sh = 6;
2727 rdev->config.cik.max_backends_per_se = 2;
2728 } else if ((rdev->pdev->device == 0x1306) ||
2729 (rdev->pdev->device == 0x1307) ||
2730 (rdev->pdev->device == 0x130B) ||
2731 (rdev->pdev->device == 0x130E) ||
2732 (rdev->pdev->device == 0x1315) ||
2733 (rdev->pdev->device == 0x131B)) {
2734 rdev->config.cik.max_cu_per_sh = 4;
2735 rdev->config.cik.max_backends_per_se = 1;
2736 } else {
2737 rdev->config.cik.max_cu_per_sh = 3;
2738 rdev->config.cik.max_backends_per_se = 1;
2739 }
2740 rdev->config.cik.max_sh_per_se = 1;
2741 rdev->config.cik.max_texture_channel_caches = 4;
2742 rdev->config.cik.max_gprs = 256;
2743 rdev->config.cik.max_gs_threads = 16;
2744 rdev->config.cik.max_hw_contexts = 8;
2745
2746 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
2747 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
2748 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
2749 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
2750 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1884 break; 2751 break;
1885 case CHIP_KABINI: 2752 case CHIP_KABINI:
1886 default: 2753 default:
@@ -2535,8 +3402,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
2535 /* ring 0 - compute and gfx */ 3402 /* ring 0 - compute and gfx */
2536 /* Set ring buffer size */ 3403 /* Set ring buffer size */
2537 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3404 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2538 rb_bufsz = drm_order(ring->ring_size / 8); 3405 rb_bufsz = order_base_2(ring->ring_size / 8);
2539 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3406 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2540#ifdef __BIG_ENDIAN 3407#ifdef __BIG_ENDIAN
2541 tmp |= BUF_SWAP_32BIT; 3408 tmp |= BUF_SWAP_32BIT;
2542#endif 3409#endif
@@ -2587,11 +3454,12 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
2587 if (rdev->wb.enabled) { 3454 if (rdev->wb.enabled) {
2588 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 3455 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
2589 } else { 3456 } else {
3457 mutex_lock(&rdev->srbm_mutex);
2590 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); 3458 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
2591 rptr = RREG32(CP_HQD_PQ_RPTR); 3459 rptr = RREG32(CP_HQD_PQ_RPTR);
2592 cik_srbm_select(rdev, 0, 0, 0, 0); 3460 cik_srbm_select(rdev, 0, 0, 0, 0);
3461 mutex_unlock(&rdev->srbm_mutex);
2593 } 3462 }
2594 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
2595 3463
2596 return rptr; 3464 return rptr;
2597} 3465}
@@ -2604,11 +3472,12 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
2604 if (rdev->wb.enabled) { 3472 if (rdev->wb.enabled) {
2605 wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); 3473 wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
2606 } else { 3474 } else {
3475 mutex_lock(&rdev->srbm_mutex);
2607 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); 3476 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
2608 wptr = RREG32(CP_HQD_PQ_WPTR); 3477 wptr = RREG32(CP_HQD_PQ_WPTR);
2609 cik_srbm_select(rdev, 0, 0, 0, 0); 3478 cik_srbm_select(rdev, 0, 0, 0, 0);
3479 mutex_unlock(&rdev->srbm_mutex);
2610 } 3480 }
2611 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
2612 3481
2613 return wptr; 3482 return wptr;
2614} 3483}
@@ -2616,10 +3485,8 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
2616void cik_compute_ring_set_wptr(struct radeon_device *rdev, 3485void cik_compute_ring_set_wptr(struct radeon_device *rdev,
2617 struct radeon_ring *ring) 3486 struct radeon_ring *ring)
2618{ 3487{
2619 u32 wptr = (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask; 3488 rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
2620 3489 WDOORBELL32(ring->doorbell_offset, ring->wptr);
2621 rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(wptr);
2622 WDOORBELL32(ring->doorbell_offset, wptr);
2623} 3490}
2624 3491
2625/** 3492/**
@@ -2897,6 +3764,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
2897 WREG32(CP_CPF_DEBUG, tmp); 3764 WREG32(CP_CPF_DEBUG, tmp);
2898 3765
2899 /* init the pipes */ 3766 /* init the pipes */
3767 mutex_lock(&rdev->srbm_mutex);
2900 for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { 3768 for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
2901 int me = (i < 4) ? 1 : 2; 3769 int me = (i < 4) ? 1 : 2;
2902 int pipe = (i < 4) ? i : (i - 4); 3770 int pipe = (i < 4) ? i : (i - 4);
@@ -2915,10 +3783,11 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
2915 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3783 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2916 tmp = RREG32(CP_HPD_EOP_CONTROL); 3784 tmp = RREG32(CP_HPD_EOP_CONTROL);
2917 tmp &= ~EOP_SIZE_MASK; 3785 tmp &= ~EOP_SIZE_MASK;
2918 tmp |= drm_order(MEC_HPD_SIZE / 8); 3786 tmp |= order_base_2(MEC_HPD_SIZE / 8);
2919 WREG32(CP_HPD_EOP_CONTROL, tmp); 3787 WREG32(CP_HPD_EOP_CONTROL, tmp);
2920 } 3788 }
2921 cik_srbm_select(rdev, 0, 0, 0, 0); 3789 cik_srbm_select(rdev, 0, 0, 0, 0);
3790 mutex_unlock(&rdev->srbm_mutex);
2922 3791
2923 /* init the queues. Just two for now. */ 3792 /* init the queues. Just two for now. */
2924 for (i = 0; i < 2; i++) { 3793 for (i = 0; i < 2; i++) {
@@ -2972,6 +3841,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
2972 mqd->static_thread_mgmt23[0] = 0xffffffff; 3841 mqd->static_thread_mgmt23[0] = 0xffffffff;
2973 mqd->static_thread_mgmt23[1] = 0xffffffff; 3842 mqd->static_thread_mgmt23[1] = 0xffffffff;
2974 3843
3844 mutex_lock(&rdev->srbm_mutex);
2975 cik_srbm_select(rdev, rdev->ring[idx].me, 3845 cik_srbm_select(rdev, rdev->ring[idx].me,
2976 rdev->ring[idx].pipe, 3846 rdev->ring[idx].pipe,
2977 rdev->ring[idx].queue, 0); 3847 rdev->ring[idx].queue, 0);
@@ -3030,9 +3900,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
3030 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK); 3900 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
3031 3901
3032 mqd->queue_state.cp_hqd_pq_control |= 3902 mqd->queue_state.cp_hqd_pq_control |=
3033 drm_order(rdev->ring[idx].ring_size / 8); 3903 order_base_2(rdev->ring[idx].ring_size / 8);
3034 mqd->queue_state.cp_hqd_pq_control |= 3904 mqd->queue_state.cp_hqd_pq_control |=
3035 (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8); 3905 (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
3036#ifdef __BIG_ENDIAN 3906#ifdef __BIG_ENDIAN
3037 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT; 3907 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
3038#endif 3908#endif
@@ -3099,6 +3969,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
3099 WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); 3969 WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
3100 3970
3101 cik_srbm_select(rdev, 0, 0, 0, 0); 3971 cik_srbm_select(rdev, 0, 0, 0, 0);
3972 mutex_unlock(&rdev->srbm_mutex);
3102 3973
3103 radeon_bo_kunmap(rdev->ring[idx].mqd_obj); 3974 radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
3104 radeon_bo_unreserve(rdev->ring[idx].mqd_obj); 3975 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
@@ -3142,13 +4013,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
3142{ 4013{
3143 int r; 4014 int r;
3144 4015
3145 /* Reset all cp blocks */
3146 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
3147 RREG32(GRBM_SOFT_RESET);
3148 mdelay(15);
3149 WREG32(GRBM_SOFT_RESET, 0);
3150 RREG32(GRBM_SOFT_RESET);
3151
3152 r = cik_cp_load_microcode(rdev); 4016 r = cik_cp_load_microcode(rdev);
3153 if (r) 4017 if (r)
3154 return r; 4018 return r;
@@ -3163,579 +4027,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
3163 return 0; 4027 return 0;
3164} 4028}
3165 4029
3166/*
3167 * sDMA - System DMA
3168 * Starting with CIK, the GPU has new asynchronous
3169 * DMA engines. These engines are used for compute
3170 * and gfx. There are two DMA engines (SDMA0, SDMA1)
3171 * and each one supports 1 ring buffer used for gfx
3172 * and 2 queues used for compute.
3173 *
3174 * The programming model is very similar to the CP
3175 * (ring buffer, IBs, etc.), but sDMA has it's own
3176 * packet format that is different from the PM4 format
3177 * used by the CP. sDMA supports copying data, writing
3178 * embedded data, solid fills, and a number of other
3179 * things. It also has support for tiling/detiling of
3180 * buffers.
3181 */
3182/**
3183 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
3184 *
3185 * @rdev: radeon_device pointer
3186 * @ib: IB object to schedule
3187 *
3188 * Schedule an IB in the DMA ring (CIK).
3189 */
3190void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
3191 struct radeon_ib *ib)
3192{
3193 struct radeon_ring *ring = &rdev->ring[ib->ring];
3194 u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
3195
3196 if (rdev->wb.enabled) {
3197 u32 next_rptr = ring->wptr + 5;
3198 while ((next_rptr & 7) != 4)
3199 next_rptr++;
3200 next_rptr += 4;
3201 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
3202 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3203 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3204 radeon_ring_write(ring, 1); /* number of DWs to follow */
3205 radeon_ring_write(ring, next_rptr);
3206 }
3207
3208 /* IB packet must end on a 8 DW boundary */
3209 while ((ring->wptr & 7) != 4)
3210 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
3211 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
3212 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
3213 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
3214 radeon_ring_write(ring, ib->length_dw);
3215
3216}
3217
3218/**
3219 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
3220 *
3221 * @rdev: radeon_device pointer
3222 * @fence: radeon fence object
3223 *
3224 * Add a DMA fence packet to the ring to write
3225 * the fence seq number and DMA trap packet to generate
3226 * an interrupt if needed (CIK).
3227 */
3228void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
3229 struct radeon_fence *fence)
3230{
3231 struct radeon_ring *ring = &rdev->ring[fence->ring];
3232 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3233 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
3234 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
3235 u32 ref_and_mask;
3236
3237 if (fence->ring == R600_RING_TYPE_DMA_INDEX)
3238 ref_and_mask = SDMA0;
3239 else
3240 ref_and_mask = SDMA1;
3241
3242 /* write the fence */
3243 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
3244 radeon_ring_write(ring, addr & 0xffffffff);
3245 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3246 radeon_ring_write(ring, fence->seq);
3247 /* generate an interrupt */
3248 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
3249 /* flush HDP */
3250 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
3251 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
3252 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
3253 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
3254 radeon_ring_write(ring, ref_and_mask); /* MASK */
3255 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
3256}
3257
3258/**
3259 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
3260 *
3261 * @rdev: radeon_device pointer
3262 * @ring: radeon_ring structure holding ring information
3263 * @semaphore: radeon semaphore object
3264 * @emit_wait: wait or signal semaphore
3265 *
3266 * Add a DMA semaphore packet to the ring wait on or signal
3267 * other rings (CIK).
3268 */
3269void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
3270 struct radeon_ring *ring,
3271 struct radeon_semaphore *semaphore,
3272 bool emit_wait)
3273{
3274 u64 addr = semaphore->gpu_addr;
3275 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
3276
3277 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
3278 radeon_ring_write(ring, addr & 0xfffffff8);
3279 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3280}
3281
3282/**
3283 * cik_sdma_gfx_stop - stop the gfx async dma engines
3284 *
3285 * @rdev: radeon_device pointer
3286 *
3287 * Stop the gfx async dma ring buffers (CIK).
3288 */
3289static void cik_sdma_gfx_stop(struct radeon_device *rdev)
3290{
3291 u32 rb_cntl, reg_offset;
3292 int i;
3293
3294 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3295
3296 for (i = 0; i < 2; i++) {
3297 if (i == 0)
3298 reg_offset = SDMA0_REGISTER_OFFSET;
3299 else
3300 reg_offset = SDMA1_REGISTER_OFFSET;
3301 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
3302 rb_cntl &= ~SDMA_RB_ENABLE;
3303 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
3304 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
3305 }
3306}
3307
3308/**
3309 * cik_sdma_rlc_stop - stop the compute async dma engines
3310 *
3311 * @rdev: radeon_device pointer
3312 *
3313 * Stop the compute async dma queues (CIK).
3314 */
3315static void cik_sdma_rlc_stop(struct radeon_device *rdev)
3316{
3317 /* XXX todo */
3318}
3319
3320/**
3321 * cik_sdma_enable - stop the async dma engines
3322 *
3323 * @rdev: radeon_device pointer
3324 * @enable: enable/disable the DMA MEs.
3325 *
3326 * Halt or unhalt the async dma engines (CIK).
3327 */
3328static void cik_sdma_enable(struct radeon_device *rdev, bool enable)
3329{
3330 u32 me_cntl, reg_offset;
3331 int i;
3332
3333 for (i = 0; i < 2; i++) {
3334 if (i == 0)
3335 reg_offset = SDMA0_REGISTER_OFFSET;
3336 else
3337 reg_offset = SDMA1_REGISTER_OFFSET;
3338 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
3339 if (enable)
3340 me_cntl &= ~SDMA_HALT;
3341 else
3342 me_cntl |= SDMA_HALT;
3343 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
3344 }
3345}
3346
3347/**
3348 * cik_sdma_gfx_resume - setup and start the async dma engines
3349 *
3350 * @rdev: radeon_device pointer
3351 *
3352 * Set up the gfx DMA ring buffers and enable them (CIK).
3353 * Returns 0 for success, error for failure.
3354 */
3355static int cik_sdma_gfx_resume(struct radeon_device *rdev)
3356{
3357 struct radeon_ring *ring;
3358 u32 rb_cntl, ib_cntl;
3359 u32 rb_bufsz;
3360 u32 reg_offset, wb_offset;
3361 int i, r;
3362
3363 for (i = 0; i < 2; i++) {
3364 if (i == 0) {
3365 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3366 reg_offset = SDMA0_REGISTER_OFFSET;
3367 wb_offset = R600_WB_DMA_RPTR_OFFSET;
3368 } else {
3369 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
3370 reg_offset = SDMA1_REGISTER_OFFSET;
3371 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
3372 }
3373
3374 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
3375 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
3376
3377 /* Set ring buffer size in dwords */
3378 rb_bufsz = drm_order(ring->ring_size / 4);
3379 rb_cntl = rb_bufsz << 1;
3380#ifdef __BIG_ENDIAN
3381 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
3382#endif
3383 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
3384
3385 /* Initialize the ring buffer's read and write pointers */
3386 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
3387 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
3388
3389 /* set the wb address whether it's enabled or not */
3390 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
3391 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
3392 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
3393 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
3394
3395 if (rdev->wb.enabled)
3396 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
3397
3398 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
3399 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
3400
3401 ring->wptr = 0;
3402 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
3403
3404 ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
3405
3406 /* enable DMA RB */
3407 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
3408
3409 ib_cntl = SDMA_IB_ENABLE;
3410#ifdef __BIG_ENDIAN
3411 ib_cntl |= SDMA_IB_SWAP_ENABLE;
3412#endif
3413 /* enable DMA IBs */
3414 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
3415
3416 ring->ready = true;
3417
3418 r = radeon_ring_test(rdev, ring->idx, ring);
3419 if (r) {
3420 ring->ready = false;
3421 return r;
3422 }
3423 }
3424
3425 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3426
3427 return 0;
3428}
3429
3430/**
3431 * cik_sdma_rlc_resume - setup and start the async dma engines
3432 *
3433 * @rdev: radeon_device pointer
3434 *
3435 * Set up the compute DMA queues and enable them (CIK).
3436 * Returns 0 for success, error for failure.
3437 */
3438static int cik_sdma_rlc_resume(struct radeon_device *rdev)
3439{
3440 /* XXX todo */
3441 return 0;
3442}
3443
3444/**
3445 * cik_sdma_load_microcode - load the sDMA ME ucode
3446 *
3447 * @rdev: radeon_device pointer
3448 *
3449 * Loads the sDMA0/1 ucode.
3450 * Returns 0 for success, -EINVAL if the ucode is not available.
3451 */
3452static int cik_sdma_load_microcode(struct radeon_device *rdev)
3453{
3454 const __be32 *fw_data;
3455 int i;
3456
3457 if (!rdev->sdma_fw)
3458 return -EINVAL;
3459
3460 /* stop the gfx rings and rlc compute queues */
3461 cik_sdma_gfx_stop(rdev);
3462 cik_sdma_rlc_stop(rdev);
3463
3464 /* halt the MEs */
3465 cik_sdma_enable(rdev, false);
3466
3467 /* sdma0 */
3468 fw_data = (const __be32 *)rdev->sdma_fw->data;
3469 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
3470 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
3471 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
3472 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
3473
3474 /* sdma1 */
3475 fw_data = (const __be32 *)rdev->sdma_fw->data;
3476 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
3477 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
3478 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
3479 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
3480
3481 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
3482 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
3483 return 0;
3484}
3485
3486/**
3487 * cik_sdma_resume - setup and start the async dma engines
3488 *
3489 * @rdev: radeon_device pointer
3490 *
3491 * Set up the DMA engines and enable them (CIK).
3492 * Returns 0 for success, error for failure.
3493 */
3494static int cik_sdma_resume(struct radeon_device *rdev)
3495{
3496 int r;
3497
3498 /* Reset dma */
3499 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
3500 RREG32(SRBM_SOFT_RESET);
3501 udelay(50);
3502 WREG32(SRBM_SOFT_RESET, 0);
3503 RREG32(SRBM_SOFT_RESET);
3504
3505 r = cik_sdma_load_microcode(rdev);
3506 if (r)
3507 return r;
3508
3509 /* unhalt the MEs */
3510 cik_sdma_enable(rdev, true);
3511
3512 /* start the gfx rings and rlc compute queues */
3513 r = cik_sdma_gfx_resume(rdev);
3514 if (r)
3515 return r;
3516 r = cik_sdma_rlc_resume(rdev);
3517 if (r)
3518 return r;
3519
3520 return 0;
3521}
3522
3523/**
3524 * cik_sdma_fini - tear down the async dma engines
3525 *
3526 * @rdev: radeon_device pointer
3527 *
3528 * Stop the async dma engines and free the rings (CIK).
3529 */
3530static void cik_sdma_fini(struct radeon_device *rdev)
3531{
3532 /* stop the gfx rings and rlc compute queues */
3533 cik_sdma_gfx_stop(rdev);
3534 cik_sdma_rlc_stop(rdev);
3535 /* halt the MEs */
3536 cik_sdma_enable(rdev, false);
3537 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
3538 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
3539 /* XXX - compute dma queue tear down */
3540}
3541
3542/**
3543 * cik_copy_dma - copy pages using the DMA engine
3544 *
3545 * @rdev: radeon_device pointer
3546 * @src_offset: src GPU address
3547 * @dst_offset: dst GPU address
3548 * @num_gpu_pages: number of GPU pages to xfer
3549 * @fence: radeon fence object
3550 *
3551 * Copy GPU paging using the DMA engine (CIK).
3552 * Used by the radeon ttm implementation to move pages if
3553 * registered as the asic copy callback.
3554 */
3555int cik_copy_dma(struct radeon_device *rdev,
3556 uint64_t src_offset, uint64_t dst_offset,
3557 unsigned num_gpu_pages,
3558 struct radeon_fence **fence)
3559{
3560 struct radeon_semaphore *sem = NULL;
3561 int ring_index = rdev->asic->copy.dma_ring_index;
3562 struct radeon_ring *ring = &rdev->ring[ring_index];
3563 u32 size_in_bytes, cur_size_in_bytes;
3564 int i, num_loops;
3565 int r = 0;
3566
3567 r = radeon_semaphore_create(rdev, &sem);
3568 if (r) {
3569 DRM_ERROR("radeon: moving bo (%d).\n", r);
3570 return r;
3571 }
3572
3573 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3574 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3575 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
3576 if (r) {
3577 DRM_ERROR("radeon: moving bo (%d).\n", r);
3578 radeon_semaphore_free(rdev, &sem, NULL);
3579 return r;
3580 }
3581
3582 if (radeon_fence_need_sync(*fence, ring->idx)) {
3583 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3584 ring->idx);
3585 radeon_fence_note_sync(*fence, ring->idx);
3586 } else {
3587 radeon_semaphore_free(rdev, &sem, NULL);
3588 }
3589
3590 for (i = 0; i < num_loops; i++) {
3591 cur_size_in_bytes = size_in_bytes;
3592 if (cur_size_in_bytes > 0x1fffff)
3593 cur_size_in_bytes = 0x1fffff;
3594 size_in_bytes -= cur_size_in_bytes;
3595 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
3596 radeon_ring_write(ring, cur_size_in_bytes);
3597 radeon_ring_write(ring, 0); /* src/dst endian swap */
3598 radeon_ring_write(ring, src_offset & 0xffffffff);
3599 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
3600 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3601 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
3602 src_offset += cur_size_in_bytes;
3603 dst_offset += cur_size_in_bytes;
3604 }
3605
3606 r = radeon_fence_emit(rdev, fence, ring->idx);
3607 if (r) {
3608 radeon_ring_unlock_undo(rdev, ring);
3609 return r;
3610 }
3611
3612 radeon_ring_unlock_commit(rdev, ring);
3613 radeon_semaphore_free(rdev, &sem, *fence);
3614
3615 return r;
3616}
3617
3618/**
3619 * cik_sdma_ring_test - simple async dma engine test
3620 *
3621 * @rdev: radeon_device pointer
3622 * @ring: radeon_ring structure holding ring information
3623 *
3624 * Test the DMA engine by writing using it to write an
3625 * value to memory. (CIK).
3626 * Returns 0 for success, error for failure.
3627 */
3628int cik_sdma_ring_test(struct radeon_device *rdev,
3629 struct radeon_ring *ring)
3630{
3631 unsigned i;
3632 int r;
3633 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3634 u32 tmp;
3635
3636 if (!ptr) {
3637 DRM_ERROR("invalid vram scratch pointer\n");
3638 return -EINVAL;
3639 }
3640
3641 tmp = 0xCAFEDEAD;
3642 writel(tmp, ptr);
3643
3644 r = radeon_ring_lock(rdev, ring, 4);
3645 if (r) {
3646 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
3647 return r;
3648 }
3649 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
3650 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
3651 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
3652 radeon_ring_write(ring, 1); /* number of DWs to follow */
3653 radeon_ring_write(ring, 0xDEADBEEF);
3654 radeon_ring_unlock_commit(rdev, ring);
3655
3656 for (i = 0; i < rdev->usec_timeout; i++) {
3657 tmp = readl(ptr);
3658 if (tmp == 0xDEADBEEF)
3659 break;
3660 DRM_UDELAY(1);
3661 }
3662
3663 if (i < rdev->usec_timeout) {
3664 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
3665 } else {
3666 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
3667 ring->idx, tmp);
3668 r = -EINVAL;
3669 }
3670 return r;
3671}
3672
3673/**
3674 * cik_sdma_ib_test - test an IB on the DMA engine
3675 *
3676 * @rdev: radeon_device pointer
3677 * @ring: radeon_ring structure holding ring information
3678 *
3679 * Test a simple IB in the DMA ring (CIK).
3680 * Returns 0 on success, error on failure.
3681 */
3682int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3683{
3684 struct radeon_ib ib;
3685 unsigned i;
3686 int r;
3687 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3688 u32 tmp = 0;
3689
3690 if (!ptr) {
3691 DRM_ERROR("invalid vram scratch pointer\n");
3692 return -EINVAL;
3693 }
3694
3695 tmp = 0xCAFEDEAD;
3696 writel(tmp, ptr);
3697
3698 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3699 if (r) {
3700 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3701 return r;
3702 }
3703
3704 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
3705 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
3706 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
3707 ib.ptr[3] = 1;
3708 ib.ptr[4] = 0xDEADBEEF;
3709 ib.length_dw = 5;
3710
3711 r = radeon_ib_schedule(rdev, &ib, NULL);
3712 if (r) {
3713 radeon_ib_free(rdev, &ib);
3714 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3715 return r;
3716 }
3717 r = radeon_fence_wait(ib.fence, false);
3718 if (r) {
3719 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3720 return r;
3721 }
3722 for (i = 0; i < rdev->usec_timeout; i++) {
3723 tmp = readl(ptr);
3724 if (tmp == 0xDEADBEEF)
3725 break;
3726 DRM_UDELAY(1);
3727 }
3728 if (i < rdev->usec_timeout) {
3729 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3730 } else {
3731 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
3732 r = -EINVAL;
3733 }
3734 radeon_ib_free(rdev, &ib);
3735 return r;
3736}
3737
3738
3739static void cik_print_gpu_status_regs(struct radeon_device *rdev) 4030static void cik_print_gpu_status_regs(struct radeon_device *rdev)
3740{ 4031{
3741 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 4032 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
@@ -3785,7 +4076,7 @@ static void cik_print_gpu_status_regs(struct radeon_device *rdev)
3785 * mask to be used by cik_gpu_soft_reset(). 4076 * mask to be used by cik_gpu_soft_reset().
3786 * Returns a mask of the blocks to be reset. 4077 * Returns a mask of the blocks to be reset.
3787 */ 4078 */
3788static u32 cik_gpu_check_soft_reset(struct radeon_device *rdev) 4079u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
3789{ 4080{
3790 u32 reset_mask = 0; 4081 u32 reset_mask = 0;
3791 u32 tmp; 4082 u32 tmp;
@@ -4036,34 +4327,6 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4036 return radeon_ring_test_lockup(rdev, ring); 4327 return radeon_ring_test_lockup(rdev, ring);
4037} 4328}
4038 4329
4039/**
4040 * cik_sdma_is_lockup - Check if the DMA engine is locked up
4041 *
4042 * @rdev: radeon_device pointer
4043 * @ring: radeon_ring structure holding ring information
4044 *
4045 * Check if the async DMA engine is locked up (CIK).
4046 * Returns true if the engine appears to be locked up, false if not.
4047 */
4048bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4049{
4050 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
4051 u32 mask;
4052
4053 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
4054 mask = RADEON_RESET_DMA;
4055 else
4056 mask = RADEON_RESET_DMA1;
4057
4058 if (!(reset_mask & mask)) {
4059 radeon_ring_lockup_update(ring);
4060 return false;
4061 }
4062 /* force ring activities */
4063 radeon_ring_force_activity(rdev, ring);
4064 return radeon_ring_test_lockup(rdev, ring);
4065}
4066
4067/* MC */ 4330/* MC */
4068/** 4331/**
4069 * cik_mc_program - program the GPU memory controller 4332 * cik_mc_program - program the GPU memory controller
@@ -4320,6 +4583,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
4320 4583
4321 /* XXX SH_MEM regs */ 4584 /* XXX SH_MEM regs */
4322 /* where to put LDS, scratch, GPUVM in FSA64 space */ 4585 /* where to put LDS, scratch, GPUVM in FSA64 space */
4586 mutex_lock(&rdev->srbm_mutex);
4323 for (i = 0; i < 16; i++) { 4587 for (i = 0; i < 16; i++) {
4324 cik_srbm_select(rdev, 0, 0, 0, i); 4588 cik_srbm_select(rdev, 0, 0, 0, i);
4325 /* CP and shaders */ 4589 /* CP and shaders */
@@ -4335,6 +4599,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
4335 /* XXX SDMA RLC - todo */ 4599 /* XXX SDMA RLC - todo */
4336 } 4600 }
4337 cik_srbm_select(rdev, 0, 0, 0, 0); 4601 cik_srbm_select(rdev, 0, 0, 0, 0);
4602 mutex_unlock(&rdev->srbm_mutex);
4338 4603
4339 cik_pcie_gart_tlb_flush(rdev); 4604 cik_pcie_gart_tlb_flush(rdev);
4340 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 4605 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -4598,131 +4863,8 @@ void cik_vm_set_page(struct radeon_device *rdev,
4598 } 4863 }
4599 } else { 4864 } else {
4600 /* DMA */ 4865 /* DMA */
4601 if (flags & RADEON_VM_PAGE_SYSTEM) { 4866 cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
4602 while (count) {
4603 ndw = count * 2;
4604 if (ndw > 0xFFFFE)
4605 ndw = 0xFFFFE;
4606
4607 /* for non-physically contiguous pages (system) */
4608 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
4609 ib->ptr[ib->length_dw++] = pe;
4610 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4611 ib->ptr[ib->length_dw++] = ndw;
4612 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
4613 if (flags & RADEON_VM_PAGE_SYSTEM) {
4614 value = radeon_vm_map_gart(rdev, addr);
4615 value &= 0xFFFFFFFFFFFFF000ULL;
4616 } else if (flags & RADEON_VM_PAGE_VALID) {
4617 value = addr;
4618 } else {
4619 value = 0;
4620 }
4621 addr += incr;
4622 value |= r600_flags;
4623 ib->ptr[ib->length_dw++] = value;
4624 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4625 }
4626 }
4627 } else {
4628 while (count) {
4629 ndw = count;
4630 if (ndw > 0x7FFFF)
4631 ndw = 0x7FFFF;
4632
4633 if (flags & RADEON_VM_PAGE_VALID)
4634 value = addr;
4635 else
4636 value = 0;
4637 /* for physically contiguous pages (vram) */
4638 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
4639 ib->ptr[ib->length_dw++] = pe; /* dst addr */
4640 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4641 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
4642 ib->ptr[ib->length_dw++] = 0;
4643 ib->ptr[ib->length_dw++] = value; /* value */
4644 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4645 ib->ptr[ib->length_dw++] = incr; /* increment size */
4646 ib->ptr[ib->length_dw++] = 0;
4647 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
4648 pe += ndw * 8;
4649 addr += ndw * incr;
4650 count -= ndw;
4651 }
4652 }
4653 while (ib->length_dw & 0x7)
4654 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
4655 }
4656}
4657
4658/**
4659 * cik_dma_vm_flush - cik vm flush using sDMA
4660 *
4661 * @rdev: radeon_device pointer
4662 *
4663 * Update the page table base and flush the VM TLB
4664 * using sDMA (CIK).
4665 */
4666void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4667{
4668 struct radeon_ring *ring = &rdev->ring[ridx];
4669 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
4670 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
4671 u32 ref_and_mask;
4672
4673 if (vm == NULL)
4674 return;
4675
4676 if (ridx == R600_RING_TYPE_DMA_INDEX)
4677 ref_and_mask = SDMA0;
4678 else
4679 ref_and_mask = SDMA1;
4680
4681 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4682 if (vm->id < 8) {
4683 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
4684 } else {
4685 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
4686 } 4867 }
4687 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
4688
4689 /* update SH_MEM_* regs */
4690 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4691 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4692 radeon_ring_write(ring, VMID(vm->id));
4693
4694 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4695 radeon_ring_write(ring, SH_MEM_BASES >> 2);
4696 radeon_ring_write(ring, 0);
4697
4698 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4699 radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
4700 radeon_ring_write(ring, 0);
4701
4702 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4703 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
4704 radeon_ring_write(ring, 1);
4705
4706 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4707 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
4708 radeon_ring_write(ring, 0);
4709
4710 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4711 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4712 radeon_ring_write(ring, VMID(0));
4713
4714 /* flush HDP */
4715 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
4716 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
4717 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
4718 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
4719 radeon_ring_write(ring, ref_and_mask); /* MASK */
4720 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
4721
4722 /* flush TLB */
4723 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4724 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
4725 radeon_ring_write(ring, 1 << vm->id);
4726} 4868}
4727 4869
4728/* 4870/*
@@ -4731,31 +4873,34 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
4731 * variety of functions, the most important of which is 4873 * variety of functions, the most important of which is
4732 * the interrupt controller. 4874 * the interrupt controller.
4733 */ 4875 */
4734/** 4876static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
4735 * cik_rlc_stop - stop the RLC ME 4877 bool enable)
4736 *
4737 * @rdev: radeon_device pointer
4738 *
4739 * Halt the RLC ME (MicroEngine) (CIK).
4740 */
4741static void cik_rlc_stop(struct radeon_device *rdev)
4742{ 4878{
4743 int i, j, k; 4879 u32 tmp = RREG32(CP_INT_CNTL_RING0);
4744 u32 mask, tmp;
4745 4880
4746 tmp = RREG32(CP_INT_CNTL_RING0); 4881 if (enable)
4747 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 4882 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4883 else
4884 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4748 WREG32(CP_INT_CNTL_RING0, tmp); 4885 WREG32(CP_INT_CNTL_RING0, tmp);
4886}
4749 4887
4750 RREG32(CB_CGTT_SCLK_CTRL); 4888static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
4751 RREG32(CB_CGTT_SCLK_CTRL); 4889{
4752 RREG32(CB_CGTT_SCLK_CTRL); 4890 u32 tmp;
4753 RREG32(CB_CGTT_SCLK_CTRL);
4754 4891
4755 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc; 4892 tmp = RREG32(RLC_LB_CNTL);
4756 WREG32(RLC_CGCG_CGLS_CTRL, tmp); 4893 if (enable)
4894 tmp |= LOAD_BALANCE_ENABLE;
4895 else
4896 tmp &= ~LOAD_BALANCE_ENABLE;
4897 WREG32(RLC_LB_CNTL, tmp);
4898}
4757 4899
4758 WREG32(RLC_CNTL, 0); 4900static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
4901{
4902 u32 i, j, k;
4903 u32 mask;
4759 4904
4760 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { 4905 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
4761 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { 4906 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
@@ -4777,6 +4922,84 @@ static void cik_rlc_stop(struct radeon_device *rdev)
4777 } 4922 }
4778} 4923}
4779 4924
4925static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
4926{
4927 u32 tmp;
4928
4929 tmp = RREG32(RLC_CNTL);
4930 if (tmp != rlc)
4931 WREG32(RLC_CNTL, rlc);
4932}
4933
4934static u32 cik_halt_rlc(struct radeon_device *rdev)
4935{
4936 u32 data, orig;
4937
4938 orig = data = RREG32(RLC_CNTL);
4939
4940 if (data & RLC_ENABLE) {
4941 u32 i;
4942
4943 data &= ~RLC_ENABLE;
4944 WREG32(RLC_CNTL, data);
4945
4946 for (i = 0; i < rdev->usec_timeout; i++) {
4947 if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
4948 break;
4949 udelay(1);
4950 }
4951
4952 cik_wait_for_rlc_serdes(rdev);
4953 }
4954
4955 return orig;
4956}
4957
4958void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
4959{
4960 u32 tmp, i, mask;
4961
4962 tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
4963 WREG32(RLC_GPR_REG2, tmp);
4964
4965 mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
4966 for (i = 0; i < rdev->usec_timeout; i++) {
4967 if ((RREG32(RLC_GPM_STAT) & mask) == mask)
4968 break;
4969 udelay(1);
4970 }
4971
4972 for (i = 0; i < rdev->usec_timeout; i++) {
4973 if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
4974 break;
4975 udelay(1);
4976 }
4977}
4978
4979void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
4980{
4981 u32 tmp;
4982
4983 tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
4984 WREG32(RLC_GPR_REG2, tmp);
4985}
4986
4987/**
4988 * cik_rlc_stop - stop the RLC ME
4989 *
4990 * @rdev: radeon_device pointer
4991 *
4992 * Halt the RLC ME (MicroEngine) (CIK).
4993 */
4994static void cik_rlc_stop(struct radeon_device *rdev)
4995{
4996 WREG32(RLC_CNTL, 0);
4997
4998 cik_enable_gui_idle_interrupt(rdev, false);
4999
5000 cik_wait_for_rlc_serdes(rdev);
5001}
5002
4780/** 5003/**
4781 * cik_rlc_start - start the RLC ME 5004 * cik_rlc_start - start the RLC ME
4782 * 5005 *
@@ -4786,13 +5009,9 @@ static void cik_rlc_stop(struct radeon_device *rdev)
4786 */ 5009 */
4787static void cik_rlc_start(struct radeon_device *rdev) 5010static void cik_rlc_start(struct radeon_device *rdev)
4788{ 5011{
4789 u32 tmp;
4790
4791 WREG32(RLC_CNTL, RLC_ENABLE); 5012 WREG32(RLC_CNTL, RLC_ENABLE);
4792 5013
4793 tmp = RREG32(CP_INT_CNTL_RING0); 5014 cik_enable_gui_idle_interrupt(rdev, true);
4794 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4795 WREG32(CP_INT_CNTL_RING0, tmp);
4796 5015
4797 udelay(50); 5016 udelay(50);
4798} 5017}
@@ -4808,8 +5027,7 @@ static void cik_rlc_start(struct radeon_device *rdev)
4808 */ 5027 */
4809static int cik_rlc_resume(struct radeon_device *rdev) 5028static int cik_rlc_resume(struct radeon_device *rdev)
4810{ 5029{
4811 u32 i, size; 5030 u32 i, size, tmp;
4812 u32 clear_state_info[3];
4813 const __be32 *fw_data; 5031 const __be32 *fw_data;
4814 5032
4815 if (!rdev->rlc_fw) 5033 if (!rdev->rlc_fw)
@@ -4830,12 +5048,15 @@ static int cik_rlc_resume(struct radeon_device *rdev)
4830 5048
4831 cik_rlc_stop(rdev); 5049 cik_rlc_stop(rdev);
4832 5050
4833 WREG32(GRBM_SOFT_RESET, SOFT_RESET_RLC); 5051 /* disable CG */
4834 RREG32(GRBM_SOFT_RESET); 5052 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
4835 udelay(50); 5053 WREG32(RLC_CGCG_CGLS_CTRL, tmp);
4836 WREG32(GRBM_SOFT_RESET, 0); 5054
4837 RREG32(GRBM_SOFT_RESET); 5055 si_rlc_reset(rdev);
4838 udelay(50); 5056
5057 cik_init_pg(rdev);
5058
5059 cik_init_cg(rdev);
4839 5060
4840 WREG32(RLC_LB_CNTR_INIT, 0); 5061 WREG32(RLC_LB_CNTR_INIT, 0);
4841 WREG32(RLC_LB_CNTR_MAX, 0x00008000); 5062 WREG32(RLC_LB_CNTR_MAX, 0x00008000);
@@ -4854,20 +5075,757 @@ static int cik_rlc_resume(struct radeon_device *rdev)
4854 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++)); 5075 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
4855 WREG32(RLC_GPM_UCODE_ADDR, 0); 5076 WREG32(RLC_GPM_UCODE_ADDR, 0);
4856 5077
4857 /* XXX */ 5078 /* XXX - find out what chips support lbpw */
4858 clear_state_info[0] = 0;//upper_32_bits(rdev->rlc.save_restore_gpu_addr); 5079 cik_enable_lbpw(rdev, false);
4859 clear_state_info[1] = 0;//rdev->rlc.save_restore_gpu_addr; 5080
4860 clear_state_info[2] = 0;//cik_default_size; 5081 if (rdev->family == CHIP_BONAIRE)
4861 WREG32(RLC_GPM_SCRATCH_ADDR, 0x3d); 5082 WREG32(RLC_DRIVER_DMA_STATUS, 0);
4862 for (i = 0; i < 3; i++)
4863 WREG32(RLC_GPM_SCRATCH_DATA, clear_state_info[i]);
4864 WREG32(RLC_DRIVER_DMA_STATUS, 0);
4865 5083
4866 cik_rlc_start(rdev); 5084 cik_rlc_start(rdev);
4867 5085
4868 return 0; 5086 return 0;
4869} 5087}
4870 5088
5089static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
5090{
5091 u32 data, orig, tmp, tmp2;
5092
5093 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5094
5095 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
5096 cik_enable_gui_idle_interrupt(rdev, true);
5097
5098 tmp = cik_halt_rlc(rdev);
5099
5100 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5101 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5102 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5103 tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
5104 WREG32(RLC_SERDES_WR_CTRL, tmp2);
5105
5106 cik_update_rlc(rdev, tmp);
5107
5108 data |= CGCG_EN | CGLS_EN;
5109 } else {
5110 cik_enable_gui_idle_interrupt(rdev, false);
5111
5112 RREG32(CB_CGTT_SCLK_CTRL);
5113 RREG32(CB_CGTT_SCLK_CTRL);
5114 RREG32(CB_CGTT_SCLK_CTRL);
5115 RREG32(CB_CGTT_SCLK_CTRL);
5116
5117 data &= ~(CGCG_EN | CGLS_EN);
5118 }
5119
5120 if (orig != data)
5121 WREG32(RLC_CGCG_CGLS_CTRL, data);
5122
5123}
5124
5125static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
5126{
5127 u32 data, orig, tmp = 0;
5128
5129 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5130 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
5131 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5132 orig = data = RREG32(CP_MEM_SLP_CNTL);
5133 data |= CP_MEM_LS_EN;
5134 if (orig != data)
5135 WREG32(CP_MEM_SLP_CNTL, data);
5136 }
5137 }
5138
5139 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5140 data &= 0xfffffffd;
5141 if (orig != data)
5142 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5143
5144 tmp = cik_halt_rlc(rdev);
5145
5146 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5147 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5148 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5149 data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
5150 WREG32(RLC_SERDES_WR_CTRL, data);
5151
5152 cik_update_rlc(rdev, tmp);
5153
5154 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
5155 orig = data = RREG32(CGTS_SM_CTRL_REG);
5156 data &= ~SM_MODE_MASK;
5157 data |= SM_MODE(0x2);
5158 data |= SM_MODE_ENABLE;
5159 data &= ~CGTS_OVERRIDE;
5160 if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
5161 (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
5162 data &= ~CGTS_LS_OVERRIDE;
5163 data &= ~ON_MONITOR_ADD_MASK;
5164 data |= ON_MONITOR_ADD_EN;
5165 data |= ON_MONITOR_ADD(0x96);
5166 if (orig != data)
5167 WREG32(CGTS_SM_CTRL_REG, data);
5168 }
5169 } else {
5170 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5171 data |= 0x00000002;
5172 if (orig != data)
5173 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5174
5175 data = RREG32(RLC_MEM_SLP_CNTL);
5176 if (data & RLC_MEM_LS_EN) {
5177 data &= ~RLC_MEM_LS_EN;
5178 WREG32(RLC_MEM_SLP_CNTL, data);
5179 }
5180
5181 data = RREG32(CP_MEM_SLP_CNTL);
5182 if (data & CP_MEM_LS_EN) {
5183 data &= ~CP_MEM_LS_EN;
5184 WREG32(CP_MEM_SLP_CNTL, data);
5185 }
5186
5187 orig = data = RREG32(CGTS_SM_CTRL_REG);
5188 data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
5189 if (orig != data)
5190 WREG32(CGTS_SM_CTRL_REG, data);
5191
5192 tmp = cik_halt_rlc(rdev);
5193
5194 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5195 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5196 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5197 data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
5198 WREG32(RLC_SERDES_WR_CTRL, data);
5199
5200 cik_update_rlc(rdev, tmp);
5201 }
5202}
5203
5204static const u32 mc_cg_registers[] =
5205{
5206 MC_HUB_MISC_HUB_CG,
5207 MC_HUB_MISC_SIP_CG,
5208 MC_HUB_MISC_VM_CG,
5209 MC_XPB_CLK_GAT,
5210 ATC_MISC_CG,
5211 MC_CITF_MISC_WR_CG,
5212 MC_CITF_MISC_RD_CG,
5213 MC_CITF_MISC_VM_CG,
5214 VM_L2_CG,
5215};
5216
5217static void cik_enable_mc_ls(struct radeon_device *rdev,
5218 bool enable)
5219{
5220 int i;
5221 u32 orig, data;
5222
5223 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5224 orig = data = RREG32(mc_cg_registers[i]);
5225 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5226 data |= MC_LS_ENABLE;
5227 else
5228 data &= ~MC_LS_ENABLE;
5229 if (data != orig)
5230 WREG32(mc_cg_registers[i], data);
5231 }
5232}
5233
5234static void cik_enable_mc_mgcg(struct radeon_device *rdev,
5235 bool enable)
5236{
5237 int i;
5238 u32 orig, data;
5239
5240 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5241 orig = data = RREG32(mc_cg_registers[i]);
5242 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5243 data |= MC_CG_ENABLE;
5244 else
5245 data &= ~MC_CG_ENABLE;
5246 if (data != orig)
5247 WREG32(mc_cg_registers[i], data);
5248 }
5249}
5250
5251static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
5252 bool enable)
5253{
5254 u32 orig, data;
5255
5256 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5257 WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
5258 WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
5259 } else {
5260 orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
5261 data |= 0xff000000;
5262 if (data != orig)
5263 WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
5264
5265 orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
5266 data |= 0xff000000;
5267 if (data != orig)
5268 WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
5269 }
5270}
5271
5272static void cik_enable_sdma_mgls(struct radeon_device *rdev,
5273 bool enable)
5274{
5275 u32 orig, data;
5276
5277 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
5278 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
5279 data |= 0x100;
5280 if (orig != data)
5281 WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
5282
5283 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
5284 data |= 0x100;
5285 if (orig != data)
5286 WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
5287 } else {
5288 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
5289 data &= ~0x100;
5290 if (orig != data)
5291 WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
5292
5293 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
5294 data &= ~0x100;
5295 if (orig != data)
5296 WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
5297 }
5298}
5299
5300static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
5301 bool enable)
5302{
5303 u32 orig, data;
5304
5305 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
5306 data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5307 data = 0xfff;
5308 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
5309
5310 orig = data = RREG32(UVD_CGC_CTRL);
5311 data |= DCM;
5312 if (orig != data)
5313 WREG32(UVD_CGC_CTRL, data);
5314 } else {
5315 data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5316 data &= ~0xfff;
5317 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
5318
5319 orig = data = RREG32(UVD_CGC_CTRL);
5320 data &= ~DCM;
5321 if (orig != data)
5322 WREG32(UVD_CGC_CTRL, data);
5323 }
5324}
5325
5326static void cik_enable_bif_mgls(struct radeon_device *rdev,
5327 bool enable)
5328{
5329 u32 orig, data;
5330
5331 orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
5332
5333 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5334 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5335 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5336 else
5337 data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5338 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5339
5340 if (orig != data)
5341 WREG32_PCIE_PORT(PCIE_CNTL2, data);
5342}
5343
5344static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
5345 bool enable)
5346{
5347 u32 orig, data;
5348
5349 orig = data = RREG32(HDP_HOST_PATH_CNTL);
5350
5351 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5352 data &= ~CLOCK_GATING_DIS;
5353 else
5354 data |= CLOCK_GATING_DIS;
5355
5356 if (orig != data)
5357 WREG32(HDP_HOST_PATH_CNTL, data);
5358}
5359
5360static void cik_enable_hdp_ls(struct radeon_device *rdev,
5361 bool enable)
5362{
5363 u32 orig, data;
5364
5365 orig = data = RREG32(HDP_MEM_POWER_LS);
5366
5367 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5368 data |= HDP_LS_ENABLE;
5369 else
5370 data &= ~HDP_LS_ENABLE;
5371
5372 if (orig != data)
5373 WREG32(HDP_MEM_POWER_LS, data);
5374}
5375
5376void cik_update_cg(struct radeon_device *rdev,
5377 u32 block, bool enable)
5378{
5379 if (block & RADEON_CG_BLOCK_GFX) {
5380 /* order matters! */
5381 if (enable) {
5382 cik_enable_mgcg(rdev, true);
5383 cik_enable_cgcg(rdev, true);
5384 } else {
5385 cik_enable_cgcg(rdev, false);
5386 cik_enable_mgcg(rdev, false);
5387 }
5388 }
5389
5390 if (block & RADEON_CG_BLOCK_MC) {
5391 if (!(rdev->flags & RADEON_IS_IGP)) {
5392 cik_enable_mc_mgcg(rdev, enable);
5393 cik_enable_mc_ls(rdev, enable);
5394 }
5395 }
5396
5397 if (block & RADEON_CG_BLOCK_SDMA) {
5398 cik_enable_sdma_mgcg(rdev, enable);
5399 cik_enable_sdma_mgls(rdev, enable);
5400 }
5401
5402 if (block & RADEON_CG_BLOCK_BIF) {
5403 cik_enable_bif_mgls(rdev, enable);
5404 }
5405
5406 if (block & RADEON_CG_BLOCK_UVD) {
5407 if (rdev->has_uvd)
5408 cik_enable_uvd_mgcg(rdev, enable);
5409 }
5410
5411 if (block & RADEON_CG_BLOCK_HDP) {
5412 cik_enable_hdp_mgcg(rdev, enable);
5413 cik_enable_hdp_ls(rdev, enable);
5414 }
5415}
5416
5417static void cik_init_cg(struct radeon_device *rdev)
5418{
5419
5420 cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
5421
5422 if (rdev->has_uvd)
5423 si_init_uvd_internal_cg(rdev);
5424
5425 cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
5426 RADEON_CG_BLOCK_SDMA |
5427 RADEON_CG_BLOCK_BIF |
5428 RADEON_CG_BLOCK_UVD |
5429 RADEON_CG_BLOCK_HDP), true);
5430}
5431
5432static void cik_fini_cg(struct radeon_device *rdev)
5433{
5434 cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
5435 RADEON_CG_BLOCK_SDMA |
5436 RADEON_CG_BLOCK_BIF |
5437 RADEON_CG_BLOCK_UVD |
5438 RADEON_CG_BLOCK_HDP), false);
5439
5440 cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
5441}
5442
5443static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
5444 bool enable)
5445{
5446 u32 data, orig;
5447
5448 orig = data = RREG32(RLC_PG_CNTL);
5449 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
5450 data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
5451 else
5452 data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
5453 if (orig != data)
5454 WREG32(RLC_PG_CNTL, data);
5455}
5456
5457static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
5458 bool enable)
5459{
5460 u32 data, orig;
5461
5462 orig = data = RREG32(RLC_PG_CNTL);
5463 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
5464 data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
5465 else
5466 data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
5467 if (orig != data)
5468 WREG32(RLC_PG_CNTL, data);
5469}
5470
5471static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
5472{
5473 u32 data, orig;
5474
5475 orig = data = RREG32(RLC_PG_CNTL);
5476 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
5477 data &= ~DISABLE_CP_PG;
5478 else
5479 data |= DISABLE_CP_PG;
5480 if (orig != data)
5481 WREG32(RLC_PG_CNTL, data);
5482}
5483
5484static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
5485{
5486 u32 data, orig;
5487
5488 orig = data = RREG32(RLC_PG_CNTL);
5489 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
5490 data &= ~DISABLE_GDS_PG;
5491 else
5492 data |= DISABLE_GDS_PG;
5493 if (orig != data)
5494 WREG32(RLC_PG_CNTL, data);
5495}
5496
5497#define CP_ME_TABLE_SIZE 96
5498#define CP_ME_TABLE_OFFSET 2048
5499#define CP_MEC_TABLE_OFFSET 4096
5500
5501void cik_init_cp_pg_table(struct radeon_device *rdev)
5502{
5503 const __be32 *fw_data;
5504 volatile u32 *dst_ptr;
5505 int me, i, max_me = 4;
5506 u32 bo_offset = 0;
5507 u32 table_offset;
5508
5509 if (rdev->family == CHIP_KAVERI)
5510 max_me = 5;
5511
5512 if (rdev->rlc.cp_table_ptr == NULL)
5513 return;
5514
5515 /* write the cp table buffer */
5516 dst_ptr = rdev->rlc.cp_table_ptr;
5517 for (me = 0; me < max_me; me++) {
5518 if (me == 0) {
5519 fw_data = (const __be32 *)rdev->ce_fw->data;
5520 table_offset = CP_ME_TABLE_OFFSET;
5521 } else if (me == 1) {
5522 fw_data = (const __be32 *)rdev->pfp_fw->data;
5523 table_offset = CP_ME_TABLE_OFFSET;
5524 } else if (me == 2) {
5525 fw_data = (const __be32 *)rdev->me_fw->data;
5526 table_offset = CP_ME_TABLE_OFFSET;
5527 } else {
5528 fw_data = (const __be32 *)rdev->mec_fw->data;
5529 table_offset = CP_MEC_TABLE_OFFSET;
5530 }
5531
5532 for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
5533 dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
5534 }
5535 bo_offset += CP_ME_TABLE_SIZE;
5536 }
5537}
5538
5539static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
5540 bool enable)
5541{
5542 u32 data, orig;
5543
5544 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
5545 orig = data = RREG32(RLC_PG_CNTL);
5546 data |= GFX_PG_ENABLE;
5547 if (orig != data)
5548 WREG32(RLC_PG_CNTL, data);
5549
5550 orig = data = RREG32(RLC_AUTO_PG_CTRL);
5551 data |= AUTO_PG_EN;
5552 if (orig != data)
5553 WREG32(RLC_AUTO_PG_CTRL, data);
5554 } else {
5555 orig = data = RREG32(RLC_PG_CNTL);
5556 data &= ~GFX_PG_ENABLE;
5557 if (orig != data)
5558 WREG32(RLC_PG_CNTL, data);
5559
5560 orig = data = RREG32(RLC_AUTO_PG_CTRL);
5561 data &= ~AUTO_PG_EN;
5562 if (orig != data)
5563 WREG32(RLC_AUTO_PG_CTRL, data);
5564
5565 data = RREG32(DB_RENDER_CONTROL);
5566 }
5567}
5568
5569static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
5570{
5571 u32 mask = 0, tmp, tmp1;
5572 int i;
5573
5574 cik_select_se_sh(rdev, se, sh);
5575 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
5576 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
5577 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5578
5579 tmp &= 0xffff0000;
5580
5581 tmp |= tmp1;
5582 tmp >>= 16;
5583
5584 for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
5585 mask <<= 1;
5586 mask |= 1;
5587 }
5588
5589 return (~tmp) & mask;
5590}
5591
5592static void cik_init_ao_cu_mask(struct radeon_device *rdev)
5593{
5594 u32 i, j, k, active_cu_number = 0;
5595 u32 mask, counter, cu_bitmap;
5596 u32 tmp = 0;
5597
5598 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
5599 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
5600 mask = 1;
5601 cu_bitmap = 0;
5602 counter = 0;
5603 for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
5604 if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
5605 if (counter < 2)
5606 cu_bitmap |= mask;
5607 counter ++;
5608 }
5609 mask <<= 1;
5610 }
5611
5612 active_cu_number += counter;
5613 tmp |= (cu_bitmap << (i * 16 + j * 8));
5614 }
5615 }
5616
5617 WREG32(RLC_PG_AO_CU_MASK, tmp);
5618
5619 tmp = RREG32(RLC_MAX_PG_CU);
5620 tmp &= ~MAX_PU_CU_MASK;
5621 tmp |= MAX_PU_CU(active_cu_number);
5622 WREG32(RLC_MAX_PG_CU, tmp);
5623}
5624
5625static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
5626 bool enable)
5627{
5628 u32 data, orig;
5629
5630 orig = data = RREG32(RLC_PG_CNTL);
5631 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
5632 data |= STATIC_PER_CU_PG_ENABLE;
5633 else
5634 data &= ~STATIC_PER_CU_PG_ENABLE;
5635 if (orig != data)
5636 WREG32(RLC_PG_CNTL, data);
5637}
5638
5639static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
5640 bool enable)
5641{
5642 u32 data, orig;
5643
5644 orig = data = RREG32(RLC_PG_CNTL);
5645 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
5646 data |= DYN_PER_CU_PG_ENABLE;
5647 else
5648 data &= ~DYN_PER_CU_PG_ENABLE;
5649 if (orig != data)
5650 WREG32(RLC_PG_CNTL, data);
5651}
5652
5653#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
5654#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
5655
5656static void cik_init_gfx_cgpg(struct radeon_device *rdev)
5657{
5658 u32 data, orig;
5659 u32 i;
5660
5661 if (rdev->rlc.cs_data) {
5662 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
5663 WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
5664 WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
5665 WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
5666 } else {
5667 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
5668 for (i = 0; i < 3; i++)
5669 WREG32(RLC_GPM_SCRATCH_DATA, 0);
5670 }
5671 if (rdev->rlc.reg_list) {
5672 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
5673 for (i = 0; i < rdev->rlc.reg_list_size; i++)
5674 WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
5675 }
5676
5677 orig = data = RREG32(RLC_PG_CNTL);
5678 data |= GFX_PG_SRC;
5679 if (orig != data)
5680 WREG32(RLC_PG_CNTL, data);
5681
5682 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5683 WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
5684
5685 data = RREG32(CP_RB_WPTR_POLL_CNTL);
5686 data &= ~IDLE_POLL_COUNT_MASK;
5687 data |= IDLE_POLL_COUNT(0x60);
5688 WREG32(CP_RB_WPTR_POLL_CNTL, data);
5689
5690 data = 0x10101010;
5691 WREG32(RLC_PG_DELAY, data);
5692
5693 data = RREG32(RLC_PG_DELAY_2);
5694 data &= ~0xff;
5695 data |= 0x3;
5696 WREG32(RLC_PG_DELAY_2, data);
5697
5698 data = RREG32(RLC_AUTO_PG_CTRL);
5699 data &= ~GRBM_REG_SGIT_MASK;
5700 data |= GRBM_REG_SGIT(0x700);
5701 WREG32(RLC_AUTO_PG_CTRL, data);
5702
5703}
5704
5705static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
5706{
5707 cik_enable_gfx_cgpg(rdev, enable);
5708 cik_enable_gfx_static_mgpg(rdev, enable);
5709 cik_enable_gfx_dynamic_mgpg(rdev, enable);
5710}
5711
5712u32 cik_get_csb_size(struct radeon_device *rdev)
5713{
5714 u32 count = 0;
5715 const struct cs_section_def *sect = NULL;
5716 const struct cs_extent_def *ext = NULL;
5717
5718 if (rdev->rlc.cs_data == NULL)
5719 return 0;
5720
5721 /* begin clear state */
5722 count += 2;
5723 /* context control state */
5724 count += 3;
5725
5726 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5727 for (ext = sect->section; ext->extent != NULL; ++ext) {
5728 if (sect->id == SECT_CONTEXT)
5729 count += 2 + ext->reg_count;
5730 else
5731 return 0;
5732 }
5733 }
5734 /* pa_sc_raster_config/pa_sc_raster_config1 */
5735 count += 4;
5736 /* end clear state */
5737 count += 2;
5738 /* clear state */
5739 count += 2;
5740
5741 return count;
5742}
5743
5744void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5745{
5746 u32 count = 0, i;
5747 const struct cs_section_def *sect = NULL;
5748 const struct cs_extent_def *ext = NULL;
5749
5750 if (rdev->rlc.cs_data == NULL)
5751 return;
5752 if (buffer == NULL)
5753 return;
5754
5755 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
5756 buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
5757
5758 buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
5759 buffer[count++] = 0x80000000;
5760 buffer[count++] = 0x80000000;
5761
5762 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5763 for (ext = sect->section; ext->extent != NULL; ++ext) {
5764 if (sect->id == SECT_CONTEXT) {
5765 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
5766 buffer[count++] = ext->reg_index - 0xa000;
5767 for (i = 0; i < ext->reg_count; i++)
5768 buffer[count++] = ext->extent[i];
5769 } else {
5770 return;
5771 }
5772 }
5773 }
5774
5775 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
5776 buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
5777 switch (rdev->family) {
5778 case CHIP_BONAIRE:
5779 buffer[count++] = 0x16000012;
5780 buffer[count++] = 0x00000000;
5781 break;
5782 case CHIP_KAVERI:
5783 buffer[count++] = 0x00000000; /* XXX */
5784 buffer[count++] = 0x00000000;
5785 break;
5786 case CHIP_KABINI:
5787 buffer[count++] = 0x00000000; /* XXX */
5788 buffer[count++] = 0x00000000;
5789 break;
5790 default:
5791 buffer[count++] = 0x00000000;
5792 buffer[count++] = 0x00000000;
5793 break;
5794 }
5795
5796 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
5797 buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
5798
5799 buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
5800 buffer[count++] = 0;
5801}
5802
5803static void cik_init_pg(struct radeon_device *rdev)
5804{
5805 if (rdev->pg_flags) {
5806 cik_enable_sck_slowdown_on_pu(rdev, true);
5807 cik_enable_sck_slowdown_on_pd(rdev, true);
5808 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
5809 cik_init_gfx_cgpg(rdev);
5810 cik_enable_cp_pg(rdev, true);
5811 cik_enable_gds_pg(rdev, true);
5812 }
5813 cik_init_ao_cu_mask(rdev);
5814 cik_update_gfx_pg(rdev, true);
5815 }
5816}
5817
5818static void cik_fini_pg(struct radeon_device *rdev)
5819{
5820 if (rdev->pg_flags) {
5821 cik_update_gfx_pg(rdev, false);
5822 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
5823 cik_enable_cp_pg(rdev, false);
5824 cik_enable_gds_pg(rdev, false);
5825 }
5826 }
5827}
5828
4871/* 5829/*
4872 * Interrupts 5830 * Interrupts
4873 * Starting with r6xx, interrupts are handled via a ring buffer. 5831 * Starting with r6xx, interrupts are handled via a ring buffer.
@@ -5030,7 +5988,7 @@ static int cik_irq_init(struct radeon_device *rdev)
5030 WREG32(INTERRUPT_CNTL, interrupt_cntl); 5988 WREG32(INTERRUPT_CNTL, interrupt_cntl);
5031 5989
5032 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 5990 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5033 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 5991 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
5034 5992
5035 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 5993 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5036 IH_WPTR_OVERFLOW_CLEAR | 5994 IH_WPTR_OVERFLOW_CLEAR |
@@ -5086,6 +6044,7 @@ int cik_irq_set(struct radeon_device *rdev)
5086 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 6044 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
5087 u32 grbm_int_cntl = 0; 6045 u32 grbm_int_cntl = 0;
5088 u32 dma_cntl, dma_cntl1; 6046 u32 dma_cntl, dma_cntl1;
6047 u32 thermal_int;
5089 6048
5090 if (!rdev->irq.installed) { 6049 if (!rdev->irq.installed) {
5091 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 6050 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -5118,6 +6077,13 @@ int cik_irq_set(struct radeon_device *rdev)
5118 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6077 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5119 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 6078 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5120 6079
6080 if (rdev->flags & RADEON_IS_IGP)
6081 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
6082 ~(THERM_INTH_MASK | THERM_INTL_MASK);
6083 else
6084 thermal_int = RREG32_SMC(CG_THERMAL_INT) &
6085 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6086
5121 /* enable CP interrupts on all rings */ 6087 /* enable CP interrupts on all rings */
5122 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 6088 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
5123 DRM_DEBUG("cik_irq_set: sw int gfx\n"); 6089 DRM_DEBUG("cik_irq_set: sw int gfx\n");
@@ -5275,6 +6241,14 @@ int cik_irq_set(struct radeon_device *rdev)
5275 hpd6 |= DC_HPDx_INT_EN; 6241 hpd6 |= DC_HPDx_INT_EN;
5276 } 6242 }
5277 6243
6244 if (rdev->irq.dpm_thermal) {
6245 DRM_DEBUG("dpm thermal\n");
6246 if (rdev->flags & RADEON_IS_IGP)
6247 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
6248 else
6249 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6250 }
6251
5278 WREG32(CP_INT_CNTL_RING0, cp_int_cntl); 6252 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
5279 6253
5280 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); 6254 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
@@ -5309,6 +6283,11 @@ int cik_irq_set(struct radeon_device *rdev)
5309 WREG32(DC_HPD5_INT_CONTROL, hpd5); 6283 WREG32(DC_HPD5_INT_CONTROL, hpd5);
5310 WREG32(DC_HPD6_INT_CONTROL, hpd6); 6284 WREG32(DC_HPD6_INT_CONTROL, hpd6);
5311 6285
6286 if (rdev->flags & RADEON_IS_IGP)
6287 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
6288 else
6289 WREG32_SMC(CG_THERMAL_INT, thermal_int);
6290
5312 return 0; 6291 return 0;
5313} 6292}
5314 6293
@@ -5520,6 +6499,7 @@ int cik_irq_process(struct radeon_device *rdev)
5520 bool queue_hotplug = false; 6499 bool queue_hotplug = false;
5521 bool queue_reset = false; 6500 bool queue_reset = false;
5522 u32 addr, status, mc_client; 6501 u32 addr, status, mc_client;
6502 bool queue_thermal = false;
5523 6503
5524 if (!rdev->ih.enabled || rdev->shutdown) 6504 if (!rdev->ih.enabled || rdev->shutdown)
5525 return IRQ_NONE; 6505 return IRQ_NONE;
@@ -5753,6 +6733,10 @@ restart_ih:
5753 break; 6733 break;
5754 } 6734 }
5755 break; 6735 break;
6736 case 124: /* UVD */
6737 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6738 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6739 break;
5756 case 146: 6740 case 146:
5757 case 147: 6741 case 147:
5758 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 6742 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
@@ -5870,6 +6854,19 @@ restart_ih:
5870 break; 6854 break;
5871 } 6855 }
5872 break; 6856 break;
6857 case 230: /* thermal low to high */
6858 DRM_DEBUG("IH: thermal low to high\n");
6859 rdev->pm.dpm.thermal.high_to_low = false;
6860 queue_thermal = true;
6861 break;
6862 case 231: /* thermal high to low */
6863 DRM_DEBUG("IH: thermal high to low\n");
6864 rdev->pm.dpm.thermal.high_to_low = true;
6865 queue_thermal = true;
6866 break;
6867 case 233: /* GUI IDLE */
6868 DRM_DEBUG("IH: GUI idle\n");
6869 break;
5873 case 241: /* SDMA Privileged inst */ 6870 case 241: /* SDMA Privileged inst */
5874 case 247: /* SDMA Privileged inst */ 6871 case 247: /* SDMA Privileged inst */
5875 DRM_ERROR("Illegal instruction in SDMA command stream\n"); 6872 DRM_ERROR("Illegal instruction in SDMA command stream\n");
@@ -5909,9 +6906,6 @@ restart_ih:
5909 break; 6906 break;
5910 } 6907 }
5911 break; 6908 break;
5912 case 233: /* GUI IDLE */
5913 DRM_DEBUG("IH: GUI idle\n");
5914 break;
5915 default: 6909 default:
5916 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6910 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5917 break; 6911 break;
@@ -5925,6 +6919,8 @@ restart_ih:
5925 schedule_work(&rdev->hotplug_work); 6919 schedule_work(&rdev->hotplug_work);
5926 if (queue_reset) 6920 if (queue_reset)
5927 schedule_work(&rdev->reset_work); 6921 schedule_work(&rdev->reset_work);
6922 if (queue_thermal)
6923 schedule_work(&rdev->pm.dpm.thermal.work);
5928 rdev->ih.rptr = rptr; 6924 rdev->ih.rptr = rptr;
5929 WREG32(IH_RB_RPTR, rdev->ih.rptr); 6925 WREG32(IH_RB_RPTR, rdev->ih.rptr);
5930 atomic_set(&rdev->ih.lock, 0); 6926 atomic_set(&rdev->ih.lock, 0);
@@ -5954,6 +6950,18 @@ static int cik_startup(struct radeon_device *rdev)
5954 struct radeon_ring *ring; 6950 struct radeon_ring *ring;
5955 int r; 6951 int r;
5956 6952
6953 /* enable pcie gen2/3 link */
6954 cik_pcie_gen3_enable(rdev);
6955 /* enable aspm */
6956 cik_program_aspm(rdev);
6957
6958 /* scratch needs to be initialized before MC */
6959 r = r600_vram_scratch_init(rdev);
6960 if (r)
6961 return r;
6962
6963 cik_mc_program(rdev);
6964
5957 if (rdev->flags & RADEON_IS_IGP) { 6965 if (rdev->flags & RADEON_IS_IGP) {
5958 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || 6966 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
5959 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { 6967 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
@@ -5981,18 +6989,26 @@ static int cik_startup(struct radeon_device *rdev)
5981 } 6989 }
5982 } 6990 }
5983 6991
5984 r = r600_vram_scratch_init(rdev);
5985 if (r)
5986 return r;
5987
5988 cik_mc_program(rdev);
5989 r = cik_pcie_gart_enable(rdev); 6992 r = cik_pcie_gart_enable(rdev);
5990 if (r) 6993 if (r)
5991 return r; 6994 return r;
5992 cik_gpu_init(rdev); 6995 cik_gpu_init(rdev);
5993 6996
5994 /* allocate rlc buffers */ 6997 /* allocate rlc buffers */
5995 r = si_rlc_init(rdev); 6998 if (rdev->flags & RADEON_IS_IGP) {
6999 if (rdev->family == CHIP_KAVERI) {
7000 rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
7001 rdev->rlc.reg_list_size =
7002 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
7003 } else {
7004 rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
7005 rdev->rlc.reg_list_size =
7006 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
7007 }
7008 }
7009 rdev->rlc.cs_data = ci_cs_data;
7010 rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
7011 r = sumo_rlc_init(rdev);
5996 if (r) { 7012 if (r) {
5997 DRM_ERROR("Failed to init rlc BOs!\n"); 7013 DRM_ERROR("Failed to init rlc BOs!\n");
5998 return r; 7014 return r;
@@ -6040,12 +7056,15 @@ static int cik_startup(struct radeon_device *rdev)
6040 return r; 7056 return r;
6041 } 7057 }
6042 7058
6043 r = cik_uvd_resume(rdev); 7059 r = radeon_uvd_resume(rdev);
6044 if (!r) { 7060 if (!r) {
6045 r = radeon_fence_driver_start_ring(rdev, 7061 r = uvd_v4_2_resume(rdev);
6046 R600_RING_TYPE_UVD_INDEX); 7062 if (!r) {
6047 if (r) 7063 r = radeon_fence_driver_start_ring(rdev,
6048 dev_err(rdev->dev, "UVD fences init error (%d).\n", r); 7064 R600_RING_TYPE_UVD_INDEX);
7065 if (r)
7066 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
7067 }
6049 } 7068 }
6050 if (r) 7069 if (r)
6051 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 7070 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
@@ -6068,7 +7087,7 @@ static int cik_startup(struct radeon_device *rdev)
6068 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 7087 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6069 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 7088 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
6070 CP_RB0_RPTR, CP_RB0_WPTR, 7089 CP_RB0_RPTR, CP_RB0_WPTR,
6071 0, 0xfffff, RADEON_CP_PACKET2); 7090 RADEON_CP_PACKET2);
6072 if (r) 7091 if (r)
6073 return r; 7092 return r;
6074 7093
@@ -6077,7 +7096,7 @@ static int cik_startup(struct radeon_device *rdev)
6077 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 7096 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6078 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, 7097 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
6079 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, 7098 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
6080 0, 0xfffff, PACKET3(PACKET3_NOP, 0x3FFF)); 7099 PACKET3(PACKET3_NOP, 0x3FFF));
6081 if (r) 7100 if (r)
6082 return r; 7101 return r;
6083 ring->me = 1; /* first MEC */ 7102 ring->me = 1; /* first MEC */
@@ -6089,7 +7108,7 @@ static int cik_startup(struct radeon_device *rdev)
6089 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 7108 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6090 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, 7109 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
6091 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, 7110 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
6092 0, 0xffffffff, PACKET3(PACKET3_NOP, 0x3FFF)); 7111 PACKET3(PACKET3_NOP, 0x3FFF));
6093 if (r) 7112 if (r)
6094 return r; 7113 return r;
6095 /* dGPU only have 1 MEC */ 7114 /* dGPU only have 1 MEC */
@@ -6102,7 +7121,7 @@ static int cik_startup(struct radeon_device *rdev)
6102 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 7121 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
6103 SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET, 7122 SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
6104 SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET, 7123 SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
6105 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); 7124 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
6106 if (r) 7125 if (r)
6107 return r; 7126 return r;
6108 7127
@@ -6110,7 +7129,7 @@ static int cik_startup(struct radeon_device *rdev)
6110 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 7129 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
6111 SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET, 7130 SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
6112 SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET, 7131 SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
6113 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); 7132 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
6114 if (r) 7133 if (r)
6115 return r; 7134 return r;
6116 7135
@@ -6124,12 +7143,11 @@ static int cik_startup(struct radeon_device *rdev)
6124 7143
6125 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 7144 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6126 if (ring->ring_size) { 7145 if (ring->ring_size) {
6127 r = radeon_ring_init(rdev, ring, ring->ring_size, 7146 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
6128 R600_WB_UVD_RPTR_OFFSET,
6129 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 7147 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
6130 0, 0xfffff, RADEON_CP_PACKET2); 7148 RADEON_CP_PACKET2);
6131 if (!r) 7149 if (!r)
6132 r = r600_uvd_init(rdev); 7150 r = uvd_v1_0_init(rdev);
6133 if (r) 7151 if (r)
6134 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 7152 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
6135 } 7153 }
@@ -6146,6 +7164,10 @@ static int cik_startup(struct radeon_device *rdev)
6146 return r; 7164 return r;
6147 } 7165 }
6148 7166
7167 r = dce6_audio_init(rdev);
7168 if (r)
7169 return r;
7170
6149 return 0; 7171 return 0;
6150} 7172}
6151 7173
@@ -6191,11 +7213,14 @@ int cik_resume(struct radeon_device *rdev)
6191 */ 7213 */
6192int cik_suspend(struct radeon_device *rdev) 7214int cik_suspend(struct radeon_device *rdev)
6193{ 7215{
7216 dce6_audio_fini(rdev);
6194 radeon_vm_manager_fini(rdev); 7217 radeon_vm_manager_fini(rdev);
6195 cik_cp_enable(rdev, false); 7218 cik_cp_enable(rdev, false);
6196 cik_sdma_enable(rdev, false); 7219 cik_sdma_enable(rdev, false);
6197 r600_uvd_rbc_stop(rdev); 7220 uvd_v1_0_fini(rdev);
6198 radeon_uvd_suspend(rdev); 7221 radeon_uvd_suspend(rdev);
7222 cik_fini_pg(rdev);
7223 cik_fini_cg(rdev);
6199 cik_irq_suspend(rdev); 7224 cik_irq_suspend(rdev);
6200 radeon_wb_disable(rdev); 7225 radeon_wb_disable(rdev);
6201 cik_pcie_gart_disable(rdev); 7226 cik_pcie_gart_disable(rdev);
@@ -6316,7 +7341,7 @@ int cik_init(struct radeon_device *rdev)
6316 cik_cp_fini(rdev); 7341 cik_cp_fini(rdev);
6317 cik_sdma_fini(rdev); 7342 cik_sdma_fini(rdev);
6318 cik_irq_fini(rdev); 7343 cik_irq_fini(rdev);
6319 si_rlc_fini(rdev); 7344 sumo_rlc_fini(rdev);
6320 cik_mec_fini(rdev); 7345 cik_mec_fini(rdev);
6321 radeon_wb_fini(rdev); 7346 radeon_wb_fini(rdev);
6322 radeon_ib_pool_fini(rdev); 7347 radeon_ib_pool_fini(rdev);
@@ -6351,13 +7376,16 @@ void cik_fini(struct radeon_device *rdev)
6351{ 7376{
6352 cik_cp_fini(rdev); 7377 cik_cp_fini(rdev);
6353 cik_sdma_fini(rdev); 7378 cik_sdma_fini(rdev);
7379 cik_fini_pg(rdev);
7380 cik_fini_cg(rdev);
6354 cik_irq_fini(rdev); 7381 cik_irq_fini(rdev);
6355 si_rlc_fini(rdev); 7382 sumo_rlc_fini(rdev);
6356 cik_mec_fini(rdev); 7383 cik_mec_fini(rdev);
6357 radeon_wb_fini(rdev); 7384 radeon_wb_fini(rdev);
6358 radeon_vm_manager_fini(rdev); 7385 radeon_vm_manager_fini(rdev);
6359 radeon_ib_pool_fini(rdev); 7386 radeon_ib_pool_fini(rdev);
6360 radeon_irq_kms_fini(rdev); 7387 radeon_irq_kms_fini(rdev);
7388 uvd_v1_0_fini(rdev);
6361 radeon_uvd_fini(rdev); 7389 radeon_uvd_fini(rdev);
6362 cik_pcie_gart_fini(rdev); 7390 cik_pcie_gart_fini(rdev);
6363 r600_vram_scratch_fini(rdev); 7391 r600_vram_scratch_fini(rdev);
@@ -6386,8 +7414,8 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
6386 struct radeon_crtc *radeon_crtc, 7414 struct radeon_crtc *radeon_crtc,
6387 struct drm_display_mode *mode) 7415 struct drm_display_mode *mode)
6388{ 7416{
6389 u32 tmp; 7417 u32 tmp, buffer_alloc, i;
6390 7418 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
6391 /* 7419 /*
6392 * Line Buffer Setup 7420 * Line Buffer Setup
6393 * There are 6 line buffers, one for each display controllers. 7421 * There are 6 line buffers, one for each display controllers.
@@ -6397,22 +7425,37 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
6397 * them using the stereo blender. 7425 * them using the stereo blender.
6398 */ 7426 */
6399 if (radeon_crtc->base.enabled && mode) { 7427 if (radeon_crtc->base.enabled && mode) {
6400 if (mode->crtc_hdisplay < 1920) 7428 if (mode->crtc_hdisplay < 1920) {
6401 tmp = 1; 7429 tmp = 1;
6402 else if (mode->crtc_hdisplay < 2560) 7430 buffer_alloc = 2;
7431 } else if (mode->crtc_hdisplay < 2560) {
6403 tmp = 2; 7432 tmp = 2;
6404 else if (mode->crtc_hdisplay < 4096) 7433 buffer_alloc = 2;
7434 } else if (mode->crtc_hdisplay < 4096) {
6405 tmp = 0; 7435 tmp = 0;
6406 else { 7436 buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
7437 } else {
6407 DRM_DEBUG_KMS("Mode too big for LB!\n"); 7438 DRM_DEBUG_KMS("Mode too big for LB!\n");
6408 tmp = 0; 7439 tmp = 0;
7440 buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
6409 } 7441 }
6410 } else 7442 } else {
6411 tmp = 1; 7443 tmp = 1;
7444 buffer_alloc = 0;
7445 }
6412 7446
6413 WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset, 7447 WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
6414 LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0)); 7448 LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
6415 7449
7450 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
7451 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
7452 for (i = 0; i < rdev->usec_timeout; i++) {
7453 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
7454 DMIF_BUFFERS_ALLOCATED_COMPLETED)
7455 break;
7456 udelay(1);
7457 }
7458
6416 if (radeon_crtc->base.enabled && mode) { 7459 if (radeon_crtc->base.enabled && mode) {
6417 switch (tmp) { 7460 switch (tmp) {
6418 case 0: 7461 case 0:
@@ -6814,7 +7857,7 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
6814 u32 lb_size, u32 num_heads) 7857 u32 lb_size, u32 num_heads)
6815{ 7858{
6816 struct drm_display_mode *mode = &radeon_crtc->base.mode; 7859 struct drm_display_mode *mode = &radeon_crtc->base.mode;
6817 struct dce8_wm_params wm; 7860 struct dce8_wm_params wm_low, wm_high;
6818 u32 pixel_period; 7861 u32 pixel_period;
6819 u32 line_time = 0; 7862 u32 line_time = 0;
6820 u32 latency_watermark_a = 0, latency_watermark_b = 0; 7863 u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -6824,35 +7867,82 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
6824 pixel_period = 1000000 / (u32)mode->clock; 7867 pixel_period = 1000000 / (u32)mode->clock;
6825 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 7868 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
6826 7869
6827 wm.yclk = rdev->pm.current_mclk * 10; 7870 /* watermark for high clocks */
6828 wm.sclk = rdev->pm.current_sclk * 10; 7871 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
6829 wm.disp_clk = mode->clock; 7872 rdev->pm.dpm_enabled) {
6830 wm.src_width = mode->crtc_hdisplay; 7873 wm_high.yclk =
6831 wm.active_time = mode->crtc_hdisplay * pixel_period; 7874 radeon_dpm_get_mclk(rdev, false) * 10;
6832 wm.blank_time = line_time - wm.active_time; 7875 wm_high.sclk =
6833 wm.interlaced = false; 7876 radeon_dpm_get_sclk(rdev, false) * 10;
7877 } else {
7878 wm_high.yclk = rdev->pm.current_mclk * 10;
7879 wm_high.sclk = rdev->pm.current_sclk * 10;
7880 }
7881
7882 wm_high.disp_clk = mode->clock;
7883 wm_high.src_width = mode->crtc_hdisplay;
7884 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
7885 wm_high.blank_time = line_time - wm_high.active_time;
7886 wm_high.interlaced = false;
6834 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 7887 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
6835 wm.interlaced = true; 7888 wm_high.interlaced = true;
6836 wm.vsc = radeon_crtc->vsc; 7889 wm_high.vsc = radeon_crtc->vsc;
6837 wm.vtaps = 1; 7890 wm_high.vtaps = 1;
6838 if (radeon_crtc->rmx_type != RMX_OFF) 7891 if (radeon_crtc->rmx_type != RMX_OFF)
6839 wm.vtaps = 2; 7892 wm_high.vtaps = 2;
6840 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ 7893 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
6841 wm.lb_size = lb_size; 7894 wm_high.lb_size = lb_size;
6842 wm.dram_channels = cik_get_number_of_dram_channels(rdev); 7895 wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
6843 wm.num_heads = num_heads; 7896 wm_high.num_heads = num_heads;
6844 7897
6845 /* set for high clocks */ 7898 /* set for high clocks */
6846 latency_watermark_a = min(dce8_latency_watermark(&wm), (u32)65535); 7899 latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
7900
7901 /* possibly force display priority to high */
7902 /* should really do this at mode validation time... */
7903 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
7904 !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
7905 !dce8_check_latency_hiding(&wm_high) ||
7906 (rdev->disp_priority == 2)) {
7907 DRM_DEBUG_KMS("force priority to high\n");
7908 }
7909
7910 /* watermark for low clocks */
7911 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
7912 rdev->pm.dpm_enabled) {
7913 wm_low.yclk =
7914 radeon_dpm_get_mclk(rdev, true) * 10;
7915 wm_low.sclk =
7916 radeon_dpm_get_sclk(rdev, true) * 10;
7917 } else {
7918 wm_low.yclk = rdev->pm.current_mclk * 10;
7919 wm_low.sclk = rdev->pm.current_sclk * 10;
7920 }
7921
7922 wm_low.disp_clk = mode->clock;
7923 wm_low.src_width = mode->crtc_hdisplay;
7924 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
7925 wm_low.blank_time = line_time - wm_low.active_time;
7926 wm_low.interlaced = false;
7927 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
7928 wm_low.interlaced = true;
7929 wm_low.vsc = radeon_crtc->vsc;
7930 wm_low.vtaps = 1;
7931 if (radeon_crtc->rmx_type != RMX_OFF)
7932 wm_low.vtaps = 2;
7933 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
7934 wm_low.lb_size = lb_size;
7935 wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
7936 wm_low.num_heads = num_heads;
7937
6847 /* set for low clocks */ 7938 /* set for low clocks */
6848 /* wm.yclk = low clk; wm.sclk = low clk */ 7939 latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
6849 latency_watermark_b = min(dce8_latency_watermark(&wm), (u32)65535);
6850 7940
6851 /* possibly force display priority to high */ 7941 /* possibly force display priority to high */
6852 /* should really do this at mode validation time... */ 7942 /* should really do this at mode validation time... */
6853 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || 7943 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
6854 !dce8_average_bandwidth_vs_available_bandwidth(&wm) || 7944 !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
6855 !dce8_check_latency_hiding(&wm) || 7945 !dce8_check_latency_hiding(&wm_low) ||
6856 (rdev->disp_priority == 2)) { 7946 (rdev->disp_priority == 2)) {
6857 DRM_DEBUG_KMS("force priority to high\n"); 7947 DRM_DEBUG_KMS("force priority to high\n");
6858 } 7948 }
@@ -6877,6 +7967,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
6877 LATENCY_HIGH_WATERMARK(line_time))); 7967 LATENCY_HIGH_WATERMARK(line_time)));
6878 /* restore original selection */ 7968 /* restore original selection */
6879 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask); 7969 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
7970
7971 /* save values for DPM */
7972 radeon_crtc->line_time = line_time;
7973 radeon_crtc->wm_high = latency_watermark_a;
7974 radeon_crtc->wm_low = latency_watermark_b;
6880} 7975}
6881 7976
6882/** 7977/**
@@ -6966,39 +8061,307 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
6966 return r; 8061 return r;
6967} 8062}
6968 8063
6969int cik_uvd_resume(struct radeon_device *rdev) 8064static void cik_pcie_gen3_enable(struct radeon_device *rdev)
6970{ 8065{
6971 uint64_t addr; 8066 struct pci_dev *root = rdev->pdev->bus->self;
6972 uint32_t size; 8067 int bridge_pos, gpu_pos;
6973 int r; 8068 u32 speed_cntl, mask, current_data_rate;
8069 int ret, i;
8070 u16 tmp16;
6974 8071
6975 r = radeon_uvd_resume(rdev); 8072 if (radeon_pcie_gen2 == 0)
6976 if (r) 8073 return;
6977 return r; 8074
8075 if (rdev->flags & RADEON_IS_IGP)
8076 return;
6978 8077
6979 /* programm the VCPU memory controller bits 0-27 */ 8078 if (!(rdev->flags & RADEON_IS_PCIE))
6980 addr = rdev->uvd.gpu_addr >> 3; 8079 return;
6981 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; 8080
6982 WREG32(UVD_VCPU_CACHE_OFFSET0, addr); 8081 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
6983 WREG32(UVD_VCPU_CACHE_SIZE0, size); 8082 if (ret != 0)
8083 return;
6984 8084
6985 addr += size; 8085 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
6986 size = RADEON_UVD_STACK_SIZE >> 3; 8086 return;
6987 WREG32(UVD_VCPU_CACHE_OFFSET1, addr); 8087
6988 WREG32(UVD_VCPU_CACHE_SIZE1, size); 8088 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
8089 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
8090 LC_CURRENT_DATA_RATE_SHIFT;
8091 if (mask & DRM_PCIE_SPEED_80) {
8092 if (current_data_rate == 2) {
8093 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
8094 return;
8095 }
8096 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
8097 } else if (mask & DRM_PCIE_SPEED_50) {
8098 if (current_data_rate == 1) {
8099 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
8100 return;
8101 }
8102 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
8103 }
6989 8104
6990 addr += size; 8105 bridge_pos = pci_pcie_cap(root);
6991 size = RADEON_UVD_HEAP_SIZE >> 3; 8106 if (!bridge_pos)
6992 WREG32(UVD_VCPU_CACHE_OFFSET2, addr); 8107 return;
6993 WREG32(UVD_VCPU_CACHE_SIZE2, size);
6994 8108
6995 /* bits 28-31 */ 8109 gpu_pos = pci_pcie_cap(rdev->pdev);
6996 addr = (rdev->uvd.gpu_addr >> 28) & 0xF; 8110 if (!gpu_pos)
6997 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); 8111 return;
6998 8112
6999 /* bits 32-39 */ 8113 if (mask & DRM_PCIE_SPEED_80) {
7000 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; 8114 /* re-try equalization if gen3 is not already enabled */
7001 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 8115 if (current_data_rate != 2) {
8116 u16 bridge_cfg, gpu_cfg;
8117 u16 bridge_cfg2, gpu_cfg2;
8118 u32 max_lw, current_lw, tmp;
8119
8120 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
8121 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
8122
8123 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
8124 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
8125
8126 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
8127 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
8128
8129 tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
8130 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
8131 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
8132
8133 if (current_lw < max_lw) {
8134 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
8135 if (tmp & LC_RENEGOTIATION_SUPPORT) {
8136 tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
8137 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
8138 tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
8139 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
8140 }
8141 }
7002 8142
7003 return 0; 8143 for (i = 0; i < 10; i++) {
8144 /* check status */
8145 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
8146 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
8147 break;
8148
8149 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
8150 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
8151
8152 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
8153 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
8154
8155 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
8156 tmp |= LC_SET_QUIESCE;
8157 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
8158
8159 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
8160 tmp |= LC_REDO_EQ;
8161 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
8162
8163 mdelay(100);
8164
8165 /* linkctl */
8166 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
8167 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
8168 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
8169 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
8170
8171 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
8172 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
8173 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
8174 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
8175
8176 /* linkctl2 */
8177 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
8178 tmp16 &= ~((1 << 4) | (7 << 9));
8179 tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
8180 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
8181
8182 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
8183 tmp16 &= ~((1 << 4) | (7 << 9));
8184 tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
8185 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
8186
8187 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
8188 tmp &= ~LC_SET_QUIESCE;
8189 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
8190 }
8191 }
8192 }
8193
8194 /* set the link speed */
8195 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
8196 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
8197 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
8198
8199 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
8200 tmp16 &= ~0xf;
8201 if (mask & DRM_PCIE_SPEED_80)
8202 tmp16 |= 3; /* gen3 */
8203 else if (mask & DRM_PCIE_SPEED_50)
8204 tmp16 |= 2; /* gen2 */
8205 else
8206 tmp16 |= 1; /* gen1 */
8207 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
8208
8209 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
8210 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
8211 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
8212
8213 for (i = 0; i < rdev->usec_timeout; i++) {
8214 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
8215 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
8216 break;
8217 udelay(1);
8218 }
8219}
8220
8221static void cik_program_aspm(struct radeon_device *rdev)
8222{
8223 u32 data, orig;
8224 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
8225 bool disable_clkreq = false;
8226
8227 if (radeon_aspm == 0)
8228 return;
8229
8230 /* XXX double check IGPs */
8231 if (rdev->flags & RADEON_IS_IGP)
8232 return;
8233
8234 if (!(rdev->flags & RADEON_IS_PCIE))
8235 return;
8236
8237 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
8238 data &= ~LC_XMIT_N_FTS_MASK;
8239 data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
8240 if (orig != data)
8241 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
8242
8243 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
8244 data |= LC_GO_TO_RECOVERY;
8245 if (orig != data)
8246 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
8247
8248 orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
8249 data |= P_IGNORE_EDB_ERR;
8250 if (orig != data)
8251 WREG32_PCIE_PORT(PCIE_P_CNTL, data);
8252
8253 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
8254 data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
8255 data |= LC_PMI_TO_L1_DIS;
8256 if (!disable_l0s)
8257 data |= LC_L0S_INACTIVITY(7);
8258
8259 if (!disable_l1) {
8260 data |= LC_L1_INACTIVITY(7);
8261 data &= ~LC_PMI_TO_L1_DIS;
8262 if (orig != data)
8263 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
8264
8265 if (!disable_plloff_in_l1) {
8266 bool clk_req_support;
8267
8268 orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
8269 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
8270 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
8271 if (orig != data)
8272 WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
8273
8274 orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
8275 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
8276 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
8277 if (orig != data)
8278 WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
8279
8280 orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
8281 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
8282 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
8283 if (orig != data)
8284 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
8285
8286 orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
8287 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
8288 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
8289 if (orig != data)
8290 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
8291
8292 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
8293 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
8294 data |= LC_DYN_LANES_PWR_STATE(3);
8295 if (orig != data)
8296 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
8297
8298 if (!disable_clkreq) {
8299 struct pci_dev *root = rdev->pdev->bus->self;
8300 u32 lnkcap;
8301
8302 clk_req_support = false;
8303 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
8304 if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
8305 clk_req_support = true;
8306 } else {
8307 clk_req_support = false;
8308 }
8309
8310 if (clk_req_support) {
8311 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
8312 data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
8313 if (orig != data)
8314 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
8315
8316 orig = data = RREG32_SMC(THM_CLK_CNTL);
8317 data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
8318 data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
8319 if (orig != data)
8320 WREG32_SMC(THM_CLK_CNTL, data);
8321
8322 orig = data = RREG32_SMC(MISC_CLK_CTRL);
8323 data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
8324 data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
8325 if (orig != data)
8326 WREG32_SMC(MISC_CLK_CTRL, data);
8327
8328 orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
8329 data &= ~BCLK_AS_XCLK;
8330 if (orig != data)
8331 WREG32_SMC(CG_CLKPIN_CNTL, data);
8332
8333 orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
8334 data &= ~FORCE_BIF_REFCLK_EN;
8335 if (orig != data)
8336 WREG32_SMC(CG_CLKPIN_CNTL_2, data);
8337
8338 orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
8339 data &= ~MPLL_CLKOUT_SEL_MASK;
8340 data |= MPLL_CLKOUT_SEL(4);
8341 if (orig != data)
8342 WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
8343 }
8344 }
8345 } else {
8346 if (orig != data)
8347 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
8348 }
8349
8350 orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
8351 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
8352 if (orig != data)
8353 WREG32_PCIE_PORT(PCIE_CNTL2, data);
8354
8355 if (!disable_l0s) {
8356 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
8357 if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
8358 data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
8359 if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
8360 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
8361 data &= ~LC_L0S_INACTIVITY_MASK;
8362 if (orig != data)
8363 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
8364 }
8365 }
8366 }
7004} 8367}
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index d71e46d571f5..ca1bb6133580 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -24,6 +24,9 @@
24#ifndef __CIK_REG_H__ 24#ifndef __CIK_REG_H__
25#define __CIK_REG_H__ 25#define __CIK_REG_H__
26 26
27#define CIK_DIDT_IND_INDEX 0xca00
28#define CIK_DIDT_IND_DATA 0xca04
29
27#define CIK_DC_GPIO_HPD_MASK 0x65b0 30#define CIK_DC_GPIO_HPD_MASK 0x65b0
28#define CIK_DC_GPIO_HPD_A 0x65b4 31#define CIK_DC_GPIO_HPD_A 0x65b4
29#define CIK_DC_GPIO_HPD_EN 0x65b8 32#define CIK_DC_GPIO_HPD_EN 0x65b8
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
new file mode 100644
index 000000000000..b6286068e111
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -0,0 +1,785 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "radeon.h"
27#include "radeon_asic.h"
28#include "cikd.h"
29
30/* sdma */
31#define CIK_SDMA_UCODE_SIZE 1050
32#define CIK_SDMA_UCODE_VERSION 64
33
34u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
35
36/*
37 * sDMA - System DMA
38 * Starting with CIK, the GPU has new asynchronous
39 * DMA engines. These engines are used for compute
40 * and gfx. There are two DMA engines (SDMA0, SDMA1)
41 * and each one supports 1 ring buffer used for gfx
42 * and 2 queues used for compute.
43 *
44 * The programming model is very similar to the CP
45 * (ring buffer, IBs, etc.), but sDMA has it's own
46 * packet format that is different from the PM4 format
47 * used by the CP. sDMA supports copying data, writing
48 * embedded data, solid fills, and a number of other
49 * things. It also has support for tiling/detiling of
50 * buffers.
51 */
52
53/**
54 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
55 *
56 * @rdev: radeon_device pointer
57 * @ib: IB object to schedule
58 *
59 * Schedule an IB in the DMA ring (CIK).
60 */
61void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
62 struct radeon_ib *ib)
63{
64 struct radeon_ring *ring = &rdev->ring[ib->ring];
65 u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
66
67 if (rdev->wb.enabled) {
68 u32 next_rptr = ring->wptr + 5;
69 while ((next_rptr & 7) != 4)
70 next_rptr++;
71 next_rptr += 4;
72 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
73 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
74 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
75 radeon_ring_write(ring, 1); /* number of DWs to follow */
76 radeon_ring_write(ring, next_rptr);
77 }
78
79 /* IB packet must end on a 8 DW boundary */
80 while ((ring->wptr & 7) != 4)
81 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
82 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
83 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
84 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
85 radeon_ring_write(ring, ib->length_dw);
86
87}
88
89/**
90 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
91 *
92 * @rdev: radeon_device pointer
93 * @fence: radeon fence object
94 *
95 * Add a DMA fence packet to the ring to write
96 * the fence seq number and DMA trap packet to generate
97 * an interrupt if needed (CIK).
98 */
99void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
100 struct radeon_fence *fence)
101{
102 struct radeon_ring *ring = &rdev->ring[fence->ring];
103 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
104 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
105 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
106 u32 ref_and_mask;
107
108 if (fence->ring == R600_RING_TYPE_DMA_INDEX)
109 ref_and_mask = SDMA0;
110 else
111 ref_and_mask = SDMA1;
112
113 /* write the fence */
114 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
115 radeon_ring_write(ring, addr & 0xffffffff);
116 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
117 radeon_ring_write(ring, fence->seq);
118 /* generate an interrupt */
119 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
120 /* flush HDP */
121 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
122 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
123 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
124 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
125 radeon_ring_write(ring, ref_and_mask); /* MASK */
126 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
127}
128
129/**
130 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
131 *
132 * @rdev: radeon_device pointer
133 * @ring: radeon_ring structure holding ring information
134 * @semaphore: radeon semaphore object
135 * @emit_wait: wait or signal semaphore
136 *
137 * Add a DMA semaphore packet to the ring wait on or signal
138 * other rings (CIK).
139 */
140void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
141 struct radeon_ring *ring,
142 struct radeon_semaphore *semaphore,
143 bool emit_wait)
144{
145 u64 addr = semaphore->gpu_addr;
146 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
147
148 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
149 radeon_ring_write(ring, addr & 0xfffffff8);
150 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
151}
152
153/**
154 * cik_sdma_gfx_stop - stop the gfx async dma engines
155 *
156 * @rdev: radeon_device pointer
157 *
158 * Stop the gfx async dma ring buffers (CIK).
159 */
160static void cik_sdma_gfx_stop(struct radeon_device *rdev)
161{
162 u32 rb_cntl, reg_offset;
163 int i;
164
165 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
166
167 for (i = 0; i < 2; i++) {
168 if (i == 0)
169 reg_offset = SDMA0_REGISTER_OFFSET;
170 else
171 reg_offset = SDMA1_REGISTER_OFFSET;
172 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
173 rb_cntl &= ~SDMA_RB_ENABLE;
174 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
175 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
176 }
177}
178
179/**
180 * cik_sdma_rlc_stop - stop the compute async dma engines
181 *
182 * @rdev: radeon_device pointer
183 *
184 * Stop the compute async dma queues (CIK).
185 */
186static void cik_sdma_rlc_stop(struct radeon_device *rdev)
187{
188 /* XXX todo */
189}
190
191/**
192 * cik_sdma_enable - stop the async dma engines
193 *
194 * @rdev: radeon_device pointer
195 * @enable: enable/disable the DMA MEs.
196 *
197 * Halt or unhalt the async dma engines (CIK).
198 */
199void cik_sdma_enable(struct radeon_device *rdev, bool enable)
200{
201 u32 me_cntl, reg_offset;
202 int i;
203
204 for (i = 0; i < 2; i++) {
205 if (i == 0)
206 reg_offset = SDMA0_REGISTER_OFFSET;
207 else
208 reg_offset = SDMA1_REGISTER_OFFSET;
209 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
210 if (enable)
211 me_cntl &= ~SDMA_HALT;
212 else
213 me_cntl |= SDMA_HALT;
214 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
215 }
216}
217
218/**
219 * cik_sdma_gfx_resume - setup and start the async dma engines
220 *
221 * @rdev: radeon_device pointer
222 *
223 * Set up the gfx DMA ring buffers and enable them (CIK).
224 * Returns 0 for success, error for failure.
225 */
226static int cik_sdma_gfx_resume(struct radeon_device *rdev)
227{
228 struct radeon_ring *ring;
229 u32 rb_cntl, ib_cntl;
230 u32 rb_bufsz;
231 u32 reg_offset, wb_offset;
232 int i, r;
233
234 for (i = 0; i < 2; i++) {
235 if (i == 0) {
236 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
237 reg_offset = SDMA0_REGISTER_OFFSET;
238 wb_offset = R600_WB_DMA_RPTR_OFFSET;
239 } else {
240 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
241 reg_offset = SDMA1_REGISTER_OFFSET;
242 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
243 }
244
245 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
246 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
247
248 /* Set ring buffer size in dwords */
249 rb_bufsz = order_base_2(ring->ring_size / 4);
250 rb_cntl = rb_bufsz << 1;
251#ifdef __BIG_ENDIAN
252 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
253#endif
254 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
255
256 /* Initialize the ring buffer's read and write pointers */
257 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
258 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
259
260 /* set the wb address whether it's enabled or not */
261 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
262 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
263 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
264 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
265
266 if (rdev->wb.enabled)
267 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
268
269 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
270 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
271
272 ring->wptr = 0;
273 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
274
275 ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
276
277 /* enable DMA RB */
278 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
279
280 ib_cntl = SDMA_IB_ENABLE;
281#ifdef __BIG_ENDIAN
282 ib_cntl |= SDMA_IB_SWAP_ENABLE;
283#endif
284 /* enable DMA IBs */
285 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
286
287 ring->ready = true;
288
289 r = radeon_ring_test(rdev, ring->idx, ring);
290 if (r) {
291 ring->ready = false;
292 return r;
293 }
294 }
295
296 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
297
298 return 0;
299}
300
301/**
302 * cik_sdma_rlc_resume - setup and start the async dma engines
303 *
304 * @rdev: radeon_device pointer
305 *
306 * Set up the compute DMA queues and enable them (CIK).
307 * Returns 0 for success, error for failure.
308 */
309static int cik_sdma_rlc_resume(struct radeon_device *rdev)
310{
311 /* XXX todo */
312 return 0;
313}
314
315/**
316 * cik_sdma_load_microcode - load the sDMA ME ucode
317 *
318 * @rdev: radeon_device pointer
319 *
320 * Loads the sDMA0/1 ucode.
321 * Returns 0 for success, -EINVAL if the ucode is not available.
322 */
323static int cik_sdma_load_microcode(struct radeon_device *rdev)
324{
325 const __be32 *fw_data;
326 int i;
327
328 if (!rdev->sdma_fw)
329 return -EINVAL;
330
331 /* stop the gfx rings and rlc compute queues */
332 cik_sdma_gfx_stop(rdev);
333 cik_sdma_rlc_stop(rdev);
334
335 /* halt the MEs */
336 cik_sdma_enable(rdev, false);
337
338 /* sdma0 */
339 fw_data = (const __be32 *)rdev->sdma_fw->data;
340 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
341 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
342 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
343 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
344
345 /* sdma1 */
346 fw_data = (const __be32 *)rdev->sdma_fw->data;
347 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
348 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
349 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
350 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
351
352 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
353 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
354 return 0;
355}
356
357/**
358 * cik_sdma_resume - setup and start the async dma engines
359 *
360 * @rdev: radeon_device pointer
361 *
362 * Set up the DMA engines and enable them (CIK).
363 * Returns 0 for success, error for failure.
364 */
365int cik_sdma_resume(struct radeon_device *rdev)
366{
367 int r;
368
369 /* Reset dma */
370 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
371 RREG32(SRBM_SOFT_RESET);
372 udelay(50);
373 WREG32(SRBM_SOFT_RESET, 0);
374 RREG32(SRBM_SOFT_RESET);
375
376 r = cik_sdma_load_microcode(rdev);
377 if (r)
378 return r;
379
380 /* unhalt the MEs */
381 cik_sdma_enable(rdev, true);
382
383 /* start the gfx rings and rlc compute queues */
384 r = cik_sdma_gfx_resume(rdev);
385 if (r)
386 return r;
387 r = cik_sdma_rlc_resume(rdev);
388 if (r)
389 return r;
390
391 return 0;
392}
393
394/**
395 * cik_sdma_fini - tear down the async dma engines
396 *
397 * @rdev: radeon_device pointer
398 *
399 * Stop the async dma engines and free the rings (CIK).
400 */
401void cik_sdma_fini(struct radeon_device *rdev)
402{
403 /* stop the gfx rings and rlc compute queues */
404 cik_sdma_gfx_stop(rdev);
405 cik_sdma_rlc_stop(rdev);
406 /* halt the MEs */
407 cik_sdma_enable(rdev, false);
408 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
409 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
410 /* XXX - compute dma queue tear down */
411}
412
413/**
414 * cik_copy_dma - copy pages using the DMA engine
415 *
416 * @rdev: radeon_device pointer
417 * @src_offset: src GPU address
418 * @dst_offset: dst GPU address
419 * @num_gpu_pages: number of GPU pages to xfer
420 * @fence: radeon fence object
421 *
422 * Copy GPU paging using the DMA engine (CIK).
423 * Used by the radeon ttm implementation to move pages if
424 * registered as the asic copy callback.
425 */
426int cik_copy_dma(struct radeon_device *rdev,
427 uint64_t src_offset, uint64_t dst_offset,
428 unsigned num_gpu_pages,
429 struct radeon_fence **fence)
430{
431 struct radeon_semaphore *sem = NULL;
432 int ring_index = rdev->asic->copy.dma_ring_index;
433 struct radeon_ring *ring = &rdev->ring[ring_index];
434 u32 size_in_bytes, cur_size_in_bytes;
435 int i, num_loops;
436 int r = 0;
437
438 r = radeon_semaphore_create(rdev, &sem);
439 if (r) {
440 DRM_ERROR("radeon: moving bo (%d).\n", r);
441 return r;
442 }
443
444 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
445 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
446 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
447 if (r) {
448 DRM_ERROR("radeon: moving bo (%d).\n", r);
449 radeon_semaphore_free(rdev, &sem, NULL);
450 return r;
451 }
452
453 if (radeon_fence_need_sync(*fence, ring->idx)) {
454 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
455 ring->idx);
456 radeon_fence_note_sync(*fence, ring->idx);
457 } else {
458 radeon_semaphore_free(rdev, &sem, NULL);
459 }
460
461 for (i = 0; i < num_loops; i++) {
462 cur_size_in_bytes = size_in_bytes;
463 if (cur_size_in_bytes > 0x1fffff)
464 cur_size_in_bytes = 0x1fffff;
465 size_in_bytes -= cur_size_in_bytes;
466 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
467 radeon_ring_write(ring, cur_size_in_bytes);
468 radeon_ring_write(ring, 0); /* src/dst endian swap */
469 radeon_ring_write(ring, src_offset & 0xffffffff);
470 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
471 radeon_ring_write(ring, dst_offset & 0xfffffffc);
472 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
473 src_offset += cur_size_in_bytes;
474 dst_offset += cur_size_in_bytes;
475 }
476
477 r = radeon_fence_emit(rdev, fence, ring->idx);
478 if (r) {
479 radeon_ring_unlock_undo(rdev, ring);
480 return r;
481 }
482
483 radeon_ring_unlock_commit(rdev, ring);
484 radeon_semaphore_free(rdev, &sem, *fence);
485
486 return r;
487}
488
489/**
490 * cik_sdma_ring_test - simple async dma engine test
491 *
492 * @rdev: radeon_device pointer
493 * @ring: radeon_ring structure holding ring information
494 *
495 * Test the DMA engine by writing using it to write an
496 * value to memory. (CIK).
497 * Returns 0 for success, error for failure.
498 */
499int cik_sdma_ring_test(struct radeon_device *rdev,
500 struct radeon_ring *ring)
501{
502 unsigned i;
503 int r;
504 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
505 u32 tmp;
506
507 if (!ptr) {
508 DRM_ERROR("invalid vram scratch pointer\n");
509 return -EINVAL;
510 }
511
512 tmp = 0xCAFEDEAD;
513 writel(tmp, ptr);
514
515 r = radeon_ring_lock(rdev, ring, 4);
516 if (r) {
517 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
518 return r;
519 }
520 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
521 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
522 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
523 radeon_ring_write(ring, 1); /* number of DWs to follow */
524 radeon_ring_write(ring, 0xDEADBEEF);
525 radeon_ring_unlock_commit(rdev, ring);
526
527 for (i = 0; i < rdev->usec_timeout; i++) {
528 tmp = readl(ptr);
529 if (tmp == 0xDEADBEEF)
530 break;
531 DRM_UDELAY(1);
532 }
533
534 if (i < rdev->usec_timeout) {
535 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
536 } else {
537 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
538 ring->idx, tmp);
539 r = -EINVAL;
540 }
541 return r;
542}
543
544/**
545 * cik_sdma_ib_test - test an IB on the DMA engine
546 *
547 * @rdev: radeon_device pointer
548 * @ring: radeon_ring structure holding ring information
549 *
550 * Test a simple IB in the DMA ring (CIK).
551 * Returns 0 on success, error on failure.
552 */
553int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
554{
555 struct radeon_ib ib;
556 unsigned i;
557 int r;
558 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
559 u32 tmp = 0;
560
561 if (!ptr) {
562 DRM_ERROR("invalid vram scratch pointer\n");
563 return -EINVAL;
564 }
565
566 tmp = 0xCAFEDEAD;
567 writel(tmp, ptr);
568
569 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
570 if (r) {
571 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
572 return r;
573 }
574
575 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
576 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
577 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
578 ib.ptr[3] = 1;
579 ib.ptr[4] = 0xDEADBEEF;
580 ib.length_dw = 5;
581
582 r = radeon_ib_schedule(rdev, &ib, NULL);
583 if (r) {
584 radeon_ib_free(rdev, &ib);
585 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
586 return r;
587 }
588 r = radeon_fence_wait(ib.fence, false);
589 if (r) {
590 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
591 return r;
592 }
593 for (i = 0; i < rdev->usec_timeout; i++) {
594 tmp = readl(ptr);
595 if (tmp == 0xDEADBEEF)
596 break;
597 DRM_UDELAY(1);
598 }
599 if (i < rdev->usec_timeout) {
600 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
601 } else {
602 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
603 r = -EINVAL;
604 }
605 radeon_ib_free(rdev, &ib);
606 return r;
607}
608
609/**
610 * cik_sdma_is_lockup - Check if the DMA engine is locked up
611 *
612 * @rdev: radeon_device pointer
613 * @ring: radeon_ring structure holding ring information
614 *
615 * Check if the async DMA engine is locked up (CIK).
616 * Returns true if the engine appears to be locked up, false if not.
617 */
618bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
619{
620 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
621 u32 mask;
622
623 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
624 mask = RADEON_RESET_DMA;
625 else
626 mask = RADEON_RESET_DMA1;
627
628 if (!(reset_mask & mask)) {
629 radeon_ring_lockup_update(ring);
630 return false;
631 }
632 /* force ring activities */
633 radeon_ring_force_activity(rdev, ring);
634 return radeon_ring_test_lockup(rdev, ring);
635}
636
637/**
638 * cik_sdma_vm_set_page - update the page tables using sDMA
639 *
640 * @rdev: radeon_device pointer
641 * @ib: indirect buffer to fill with commands
642 * @pe: addr of the page entry
643 * @addr: dst addr to write into pe
644 * @count: number of page entries to update
645 * @incr: increase next addr by incr bytes
646 * @flags: access flags
647 *
648 * Update the page tables using sDMA (CIK).
649 */
650void cik_sdma_vm_set_page(struct radeon_device *rdev,
651 struct radeon_ib *ib,
652 uint64_t pe,
653 uint64_t addr, unsigned count,
654 uint32_t incr, uint32_t flags)
655{
656 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
657 uint64_t value;
658 unsigned ndw;
659
660 if (flags & RADEON_VM_PAGE_SYSTEM) {
661 while (count) {
662 ndw = count * 2;
663 if (ndw > 0xFFFFE)
664 ndw = 0xFFFFE;
665
666 /* for non-physically contiguous pages (system) */
667 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
668 ib->ptr[ib->length_dw++] = pe;
669 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
670 ib->ptr[ib->length_dw++] = ndw;
671 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
672 if (flags & RADEON_VM_PAGE_SYSTEM) {
673 value = radeon_vm_map_gart(rdev, addr);
674 value &= 0xFFFFFFFFFFFFF000ULL;
675 } else if (flags & RADEON_VM_PAGE_VALID) {
676 value = addr;
677 } else {
678 value = 0;
679 }
680 addr += incr;
681 value |= r600_flags;
682 ib->ptr[ib->length_dw++] = value;
683 ib->ptr[ib->length_dw++] = upper_32_bits(value);
684 }
685 }
686 } else {
687 while (count) {
688 ndw = count;
689 if (ndw > 0x7FFFF)
690 ndw = 0x7FFFF;
691
692 if (flags & RADEON_VM_PAGE_VALID)
693 value = addr;
694 else
695 value = 0;
696 /* for physically contiguous pages (vram) */
697 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
698 ib->ptr[ib->length_dw++] = pe; /* dst addr */
699 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
700 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
701 ib->ptr[ib->length_dw++] = 0;
702 ib->ptr[ib->length_dw++] = value; /* value */
703 ib->ptr[ib->length_dw++] = upper_32_bits(value);
704 ib->ptr[ib->length_dw++] = incr; /* increment size */
705 ib->ptr[ib->length_dw++] = 0;
706 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
707 pe += ndw * 8;
708 addr += ndw * incr;
709 count -= ndw;
710 }
711 }
712 while (ib->length_dw & 0x7)
713 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
714}
715
716/**
717 * cik_dma_vm_flush - cik vm flush using sDMA
718 *
719 * @rdev: radeon_device pointer
720 *
721 * Update the page table base and flush the VM TLB
722 * using sDMA (CIK).
723 */
724void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
725{
726 struct radeon_ring *ring = &rdev->ring[ridx];
727 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
728 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
729 u32 ref_and_mask;
730
731 if (vm == NULL)
732 return;
733
734 if (ridx == R600_RING_TYPE_DMA_INDEX)
735 ref_and_mask = SDMA0;
736 else
737 ref_and_mask = SDMA1;
738
739 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
740 if (vm->id < 8) {
741 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
742 } else {
743 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
744 }
745 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
746
747 /* update SH_MEM_* regs */
748 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
749 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
750 radeon_ring_write(ring, VMID(vm->id));
751
752 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
753 radeon_ring_write(ring, SH_MEM_BASES >> 2);
754 radeon_ring_write(ring, 0);
755
756 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
757 radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
758 radeon_ring_write(ring, 0);
759
760 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
761 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
762 radeon_ring_write(ring, 1);
763
764 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
765 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
766 radeon_ring_write(ring, 0);
767
768 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
769 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
770 radeon_ring_write(ring, VMID(0));
771
772 /* flush HDP */
773 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
774 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
775 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
776 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
777 radeon_ring_write(ring, ref_and_mask); /* MASK */
778 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
779
780 /* flush TLB */
781 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
782 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
783 radeon_ring_write(ring, 1 << vm->id);
784}
785
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 7e9275eaef80..203d2a09a1f5 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -28,21 +28,375 @@
28 28
29#define CIK_RB_BITMAP_WIDTH_PER_SH 2 29#define CIK_RB_BITMAP_WIDTH_PER_SH 2
30 30
31/* DIDT IND registers */
32#define DIDT_SQ_CTRL0 0x0
33# define DIDT_CTRL_EN (1 << 0)
34#define DIDT_DB_CTRL0 0x20
35#define DIDT_TD_CTRL0 0x40
36#define DIDT_TCP_CTRL0 0x60
37
31/* SMC IND registers */ 38/* SMC IND registers */
39#define DPM_TABLE_475 0x3F768
40# define SamuBootLevel(x) ((x) << 0)
41# define SamuBootLevel_MASK 0x000000ff
42# define SamuBootLevel_SHIFT 0
43# define AcpBootLevel(x) ((x) << 8)
44# define AcpBootLevel_MASK 0x0000ff00
45# define AcpBootLevel_SHIFT 8
46# define VceBootLevel(x) ((x) << 16)
47# define VceBootLevel_MASK 0x00ff0000
48# define VceBootLevel_SHIFT 16
49# define UvdBootLevel(x) ((x) << 24)
50# define UvdBootLevel_MASK 0xff000000
51# define UvdBootLevel_SHIFT 24
52
53#define FIRMWARE_FLAGS 0x3F800
54# define INTERRUPTS_ENABLED (1 << 0)
55
56#define NB_DPM_CONFIG_1 0x3F9E8
57# define Dpm0PgNbPsLo(x) ((x) << 0)
58# define Dpm0PgNbPsLo_MASK 0x000000ff
59# define Dpm0PgNbPsLo_SHIFT 0
60# define Dpm0PgNbPsHi(x) ((x) << 8)
61# define Dpm0PgNbPsHi_MASK 0x0000ff00
62# define Dpm0PgNbPsHi_SHIFT 8
63# define DpmXNbPsLo(x) ((x) << 16)
64# define DpmXNbPsLo_MASK 0x00ff0000
65# define DpmXNbPsLo_SHIFT 16
66# define DpmXNbPsHi(x) ((x) << 24)
67# define DpmXNbPsHi_MASK 0xff000000
68# define DpmXNbPsHi_SHIFT 24
69
70#define SMC_SYSCON_RESET_CNTL 0x80000000
71# define RST_REG (1 << 0)
72#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
73# define CK_DISABLE (1 << 0)
74# define CKEN (1 << 24)
75
76#define SMC_SYSCON_MISC_CNTL 0x80000010
77
78#define SMC_SYSCON_MSG_ARG_0 0x80000068
79
80#define SMC_PC_C 0x80000370
81
82#define SMC_SCRATCH9 0x80000424
83
84#define RCU_UC_EVENTS 0xC0000004
85# define BOOT_SEQ_DONE (1 << 7)
86
32#define GENERAL_PWRMGT 0xC0200000 87#define GENERAL_PWRMGT 0xC0200000
88# define GLOBAL_PWRMGT_EN (1 << 0)
89# define STATIC_PM_EN (1 << 1)
90# define THERMAL_PROTECTION_DIS (1 << 2)
91# define THERMAL_PROTECTION_TYPE (1 << 3)
92# define SW_SMIO_INDEX(x) ((x) << 6)
93# define SW_SMIO_INDEX_MASK (1 << 6)
94# define SW_SMIO_INDEX_SHIFT 6
95# define VOLT_PWRMGT_EN (1 << 10)
33# define GPU_COUNTER_CLK (1 << 15) 96# define GPU_COUNTER_CLK (1 << 15)
34 97# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
98
99#define CNB_PWRMGT_CNTL 0xC0200004
100# define GNB_SLOW_MODE(x) ((x) << 0)
101# define GNB_SLOW_MODE_MASK (3 << 0)
102# define GNB_SLOW_MODE_SHIFT 0
103# define GNB_SLOW (1 << 2)
104# define FORCE_NB_PS1 (1 << 3)
105# define DPM_ENABLED (1 << 4)
106
107#define SCLK_PWRMGT_CNTL 0xC0200008
108# define SCLK_PWRMGT_OFF (1 << 0)
109# define RESET_BUSY_CNT (1 << 4)
110# define RESET_SCLK_CNT (1 << 5)
111# define DYNAMIC_PM_EN (1 << 21)
112
113#define TARGET_AND_CURRENT_PROFILE_INDEX 0xC0200014
114# define CURRENT_STATE_MASK (0xf << 4)
115# define CURRENT_STATE_SHIFT 4
116# define CURR_MCLK_INDEX_MASK (0xf << 8)
117# define CURR_MCLK_INDEX_SHIFT 8
118# define CURR_SCLK_INDEX_MASK (0x1f << 16)
119# define CURR_SCLK_INDEX_SHIFT 16
120
121#define CG_SSP 0xC0200044
122# define SST(x) ((x) << 0)
123# define SST_MASK (0xffff << 0)
124# define SSTU(x) ((x) << 16)
125# define SSTU_MASK (0xf << 16)
126
127#define CG_DISPLAY_GAP_CNTL 0xC0200060
128# define DISP_GAP(x) ((x) << 0)
129# define DISP_GAP_MASK (3 << 0)
130# define VBI_TIMER_COUNT(x) ((x) << 4)
131# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
132# define VBI_TIMER_UNIT(x) ((x) << 20)
133# define VBI_TIMER_UNIT_MASK (7 << 20)
134# define DISP_GAP_MCHG(x) ((x) << 24)
135# define DISP_GAP_MCHG_MASK (3 << 24)
136
137#define SMU_VOLTAGE_STATUS 0xC0200094
138# define SMU_VOLTAGE_CURRENT_LEVEL_MASK (0xff << 1)
139# define SMU_VOLTAGE_CURRENT_LEVEL_SHIFT 1
140
141#define TARGET_AND_CURRENT_PROFILE_INDEX_1 0xC02000F0
142# define CURR_PCIE_INDEX_MASK (0xf << 24)
143# define CURR_PCIE_INDEX_SHIFT 24
144
145#define CG_ULV_PARAMETER 0xC0200158
146
147#define CG_FTV_0 0xC02001A8
148#define CG_FTV_1 0xC02001AC
149#define CG_FTV_2 0xC02001B0
150#define CG_FTV_3 0xC02001B4
151#define CG_FTV_4 0xC02001B8
152#define CG_FTV_5 0xC02001BC
153#define CG_FTV_6 0xC02001C0
154#define CG_FTV_7 0xC02001C4
155
156#define CG_DISPLAY_GAP_CNTL2 0xC0200230
157
158#define LCAC_SX0_OVR_SEL 0xC0400D04
159#define LCAC_SX0_OVR_VAL 0xC0400D08
160
161#define LCAC_MC0_CNTL 0xC0400D30
162#define LCAC_MC0_OVR_SEL 0xC0400D34
163#define LCAC_MC0_OVR_VAL 0xC0400D38
164#define LCAC_MC1_CNTL 0xC0400D3C
165#define LCAC_MC1_OVR_SEL 0xC0400D40
166#define LCAC_MC1_OVR_VAL 0xC0400D44
167
168#define LCAC_MC2_OVR_SEL 0xC0400D4C
169#define LCAC_MC2_OVR_VAL 0xC0400D50
170
171#define LCAC_MC3_OVR_SEL 0xC0400D58
172#define LCAC_MC3_OVR_VAL 0xC0400D5C
173
174#define LCAC_CPL_CNTL 0xC0400D80
175#define LCAC_CPL_OVR_SEL 0xC0400D84
176#define LCAC_CPL_OVR_VAL 0xC0400D88
177
178/* dGPU */
179#define CG_THERMAL_CTRL 0xC0300004
180#define DPM_EVENT_SRC(x) ((x) << 0)
181#define DPM_EVENT_SRC_MASK (7 << 0)
182#define DIG_THERM_DPM(x) ((x) << 14)
183#define DIG_THERM_DPM_MASK 0x003FC000
184#define DIG_THERM_DPM_SHIFT 14
185
186#define CG_THERMAL_INT 0xC030000C
187#define CI_DIG_THERM_INTH(x) ((x) << 8)
188#define CI_DIG_THERM_INTH_MASK 0x0000FF00
189#define CI_DIG_THERM_INTH_SHIFT 8
190#define CI_DIG_THERM_INTL(x) ((x) << 16)
191#define CI_DIG_THERM_INTL_MASK 0x00FF0000
192#define CI_DIG_THERM_INTL_SHIFT 16
193#define THERM_INT_MASK_HIGH (1 << 24)
194#define THERM_INT_MASK_LOW (1 << 25)
195
196#define CG_MULT_THERMAL_STATUS 0xC0300014
197#define ASIC_MAX_TEMP(x) ((x) << 0)
198#define ASIC_MAX_TEMP_MASK 0x000001ff
199#define ASIC_MAX_TEMP_SHIFT 0
200#define CTF_TEMP(x) ((x) << 9)
201#define CTF_TEMP_MASK 0x0003fe00
202#define CTF_TEMP_SHIFT 9
203
204#define CG_SPLL_FUNC_CNTL 0xC0500140
205#define SPLL_RESET (1 << 0)
206#define SPLL_PWRON (1 << 1)
207#define SPLL_BYPASS_EN (1 << 3)
208#define SPLL_REF_DIV(x) ((x) << 5)
209#define SPLL_REF_DIV_MASK (0x3f << 5)
210#define SPLL_PDIV_A(x) ((x) << 20)
211#define SPLL_PDIV_A_MASK (0x7f << 20)
212#define SPLL_PDIV_A_SHIFT 20
213#define CG_SPLL_FUNC_CNTL_2 0xC0500144
214#define SCLK_MUX_SEL(x) ((x) << 0)
215#define SCLK_MUX_SEL_MASK (0x1ff << 0)
216#define CG_SPLL_FUNC_CNTL_3 0xC0500148
217#define SPLL_FB_DIV(x) ((x) << 0)
218#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
219#define SPLL_FB_DIV_SHIFT 0
220#define SPLL_DITHEN (1 << 28)
221#define CG_SPLL_FUNC_CNTL_4 0xC050014C
222
223#define CG_SPLL_SPREAD_SPECTRUM 0xC0500164
224#define SSEN (1 << 0)
225#define CLK_S(x) ((x) << 4)
226#define CLK_S_MASK (0xfff << 4)
227#define CLK_S_SHIFT 4
228#define CG_SPLL_SPREAD_SPECTRUM_2 0xC0500168
229#define CLK_V(x) ((x) << 0)
230#define CLK_V_MASK (0x3ffffff << 0)
231#define CLK_V_SHIFT 0
232
233#define MPLL_BYPASSCLK_SEL 0xC050019C
234# define MPLL_CLKOUT_SEL(x) ((x) << 8)
235# define MPLL_CLKOUT_SEL_MASK 0xFF00
35#define CG_CLKPIN_CNTL 0xC05001A0 236#define CG_CLKPIN_CNTL 0xC05001A0
36# define XTALIN_DIVIDE (1 << 1) 237# define XTALIN_DIVIDE (1 << 1)
37 238# define BCLK_AS_XCLK (1 << 2)
239#define CG_CLKPIN_CNTL_2 0xC05001A4
240# define FORCE_BIF_REFCLK_EN (1 << 3)
241# define MUX_TCLK_TO_XCLK (1 << 8)
242#define THM_CLK_CNTL 0xC05001A8
243# define CMON_CLK_SEL(x) ((x) << 0)
244# define CMON_CLK_SEL_MASK 0xFF
245# define TMON_CLK_SEL(x) ((x) << 8)
246# define TMON_CLK_SEL_MASK 0xFF00
247#define MISC_CLK_CTRL 0xC05001AC
248# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
249# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
250# define ZCLK_SEL(x) ((x) << 8)
251# define ZCLK_SEL_MASK 0xFF00
252
253/* KV/KB */
254#define CG_THERMAL_INT_CTRL 0xC2100028
255#define DIG_THERM_INTH(x) ((x) << 0)
256#define DIG_THERM_INTH_MASK 0x000000FF
257#define DIG_THERM_INTH_SHIFT 0
258#define DIG_THERM_INTL(x) ((x) << 8)
259#define DIG_THERM_INTL_MASK 0x0000FF00
260#define DIG_THERM_INTL_SHIFT 8
261#define THERM_INTH_MASK (1 << 24)
262#define THERM_INTL_MASK (1 << 25)
263
264/* PCIE registers idx/data 0x38/0x3c */
265#define PB0_PIF_PWRDOWN_0 0x1100012 /* PCIE */
266# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
267# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
268# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
269# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
270# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
271# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
272# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
273# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
274# define PLL_RAMP_UP_TIME_0_SHIFT 24
275#define PB0_PIF_PWRDOWN_1 0x1100013 /* PCIE */
276# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
277# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
278# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
279# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
280# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
281# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
282# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
283# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
284# define PLL_RAMP_UP_TIME_1_SHIFT 24
285
286#define PCIE_CNTL2 0x1001001c /* PCIE */
287# define SLV_MEM_LS_EN (1 << 16)
288# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
289# define MST_MEM_LS_EN (1 << 18)
290# define REPLAY_MEM_LS_EN (1 << 19)
291
292#define PCIE_LC_STATUS1 0x1400028 /* PCIE */
293# define LC_REVERSE_RCVR (1 << 0)
294# define LC_REVERSE_XMIT (1 << 1)
295# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
296# define LC_OPERATING_LINK_WIDTH_SHIFT 2
297# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
298# define LC_DETECTED_LINK_WIDTH_SHIFT 5
299
300#define PCIE_P_CNTL 0x1400040 /* PCIE */
301# define P_IGNORE_EDB_ERR (1 << 6)
302
303#define PB1_PIF_PWRDOWN_0 0x2100012 /* PCIE */
304#define PB1_PIF_PWRDOWN_1 0x2100013 /* PCIE */
305
306#define PCIE_LC_CNTL 0x100100A0 /* PCIE */
307# define LC_L0S_INACTIVITY(x) ((x) << 8)
308# define LC_L0S_INACTIVITY_MASK (0xf << 8)
309# define LC_L0S_INACTIVITY_SHIFT 8
310# define LC_L1_INACTIVITY(x) ((x) << 12)
311# define LC_L1_INACTIVITY_MASK (0xf << 12)
312# define LC_L1_INACTIVITY_SHIFT 12
313# define LC_PMI_TO_L1_DIS (1 << 16)
314# define LC_ASPM_TO_L1_DIS (1 << 24)
315
316#define PCIE_LC_LINK_WIDTH_CNTL 0x100100A2 /* PCIE */
317# define LC_LINK_WIDTH_SHIFT 0
318# define LC_LINK_WIDTH_MASK 0x7
319# define LC_LINK_WIDTH_X0 0
320# define LC_LINK_WIDTH_X1 1
321# define LC_LINK_WIDTH_X2 2
322# define LC_LINK_WIDTH_X4 3
323# define LC_LINK_WIDTH_X8 4
324# define LC_LINK_WIDTH_X16 6
325# define LC_LINK_WIDTH_RD_SHIFT 4
326# define LC_LINK_WIDTH_RD_MASK 0x70
327# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
328# define LC_RECONFIG_NOW (1 << 8)
329# define LC_RENEGOTIATION_SUPPORT (1 << 9)
330# define LC_RENEGOTIATE_EN (1 << 10)
331# define LC_SHORT_RECONFIG_EN (1 << 11)
332# define LC_UPCONFIGURE_SUPPORT (1 << 12)
333# define LC_UPCONFIGURE_DIS (1 << 13)
334# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
335# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
336# define LC_DYN_LANES_PWR_STATE_SHIFT 21
337#define PCIE_LC_N_FTS_CNTL 0x100100a3 /* PCIE */
338# define LC_XMIT_N_FTS(x) ((x) << 0)
339# define LC_XMIT_N_FTS_MASK (0xff << 0)
340# define LC_XMIT_N_FTS_SHIFT 0
341# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
342# define LC_N_FTS_MASK (0xff << 24)
343#define PCIE_LC_SPEED_CNTL 0x100100A4 /* PCIE */
344# define LC_GEN2_EN_STRAP (1 << 0)
345# define LC_GEN3_EN_STRAP (1 << 1)
346# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
347# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
348# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
349# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
350# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
351# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
352# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
353# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
354# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
355# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
356# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
357# define LC_CURRENT_DATA_RATE_SHIFT 13
358# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
359# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
360# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
361# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
362# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
363
364#define PCIE_LC_CNTL2 0x100100B1 /* PCIE */
365# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
366# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
367
368#define PCIE_LC_CNTL3 0x100100B5 /* PCIE */
369# define LC_GO_TO_RECOVERY (1 << 30)
370#define PCIE_LC_CNTL4 0x100100B6 /* PCIE */
371# define LC_REDO_EQ (1 << 5)
372# define LC_SET_QUIESCE (1 << 13)
373
374/* direct registers */
38#define PCIE_INDEX 0x38 375#define PCIE_INDEX 0x38
39#define PCIE_DATA 0x3C 376#define PCIE_DATA 0x3C
40 377
378#define SMC_IND_INDEX_0 0x200
379#define SMC_IND_DATA_0 0x204
380
381#define SMC_IND_ACCESS_CNTL 0x240
382#define AUTO_INCREMENT_IND_0 (1 << 0)
383
384#define SMC_MESSAGE_0 0x250
385#define SMC_MSG_MASK 0xffff
386#define SMC_RESP_0 0x254
387#define SMC_RESP_MASK 0xffff
388
389#define SMC_MSG_ARG_0 0x290
390
41#define VGA_HDP_CONTROL 0x328 391#define VGA_HDP_CONTROL 0x328
42#define VGA_MEMORY_DISABLE (1 << 4) 392#define VGA_MEMORY_DISABLE (1 << 4)
43 393
44#define DMIF_ADDR_CALC 0xC00 394#define DMIF_ADDR_CALC 0xC00
45 395
396#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
397# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
398# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
399
46#define SRBM_GFX_CNTL 0xE44 400#define SRBM_GFX_CNTL 0xE44
47#define PIPEID(x) ((x) << 0) 401#define PIPEID(x) ((x) << 0)
48#define MEID(x) ((x) << 2) 402#define MEID(x) ((x) << 2)
@@ -172,6 +526,10 @@
172#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C 526#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
173#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580 527#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
174 528
529#define VM_L2_CG 0x15c0
530#define MC_CG_ENABLE (1 << 18)
531#define MC_LS_ENABLE (1 << 19)
532
175#define MC_SHARED_CHMAP 0x2004 533#define MC_SHARED_CHMAP 0x2004
176#define NOOFCHAN_SHIFT 12 534#define NOOFCHAN_SHIFT 12
177#define NOOFCHAN_MASK 0x0000f000 535#define NOOFCHAN_MASK 0x0000f000
@@ -201,6 +559,17 @@
201 559
202#define MC_SHARED_BLACKOUT_CNTL 0x20ac 560#define MC_SHARED_BLACKOUT_CNTL 0x20ac
203 561
562#define MC_HUB_MISC_HUB_CG 0x20b8
563#define MC_HUB_MISC_VM_CG 0x20bc
564
565#define MC_HUB_MISC_SIP_CG 0x20c0
566
567#define MC_XPB_CLK_GAT 0x2478
568
569#define MC_CITF_MISC_RD_CG 0x2648
570#define MC_CITF_MISC_WR_CG 0x264c
571#define MC_CITF_MISC_VM_CG 0x2650
572
204#define MC_ARB_RAMCFG 0x2760 573#define MC_ARB_RAMCFG 0x2760
205#define NOOFBANK_SHIFT 0 574#define NOOFBANK_SHIFT 0
206#define NOOFBANK_MASK 0x00000003 575#define NOOFBANK_MASK 0x00000003
@@ -215,9 +584,37 @@
215#define NOOFGROUPS_SHIFT 12 584#define NOOFGROUPS_SHIFT 12
216#define NOOFGROUPS_MASK 0x00001000 585#define NOOFGROUPS_MASK 0x00001000
217 586
587#define MC_ARB_DRAM_TIMING 0x2774
588#define MC_ARB_DRAM_TIMING2 0x2778
589
590#define MC_ARB_BURST_TIME 0x2808
591#define STATE0(x) ((x) << 0)
592#define STATE0_MASK (0x1f << 0)
593#define STATE0_SHIFT 0
594#define STATE1(x) ((x) << 5)
595#define STATE1_MASK (0x1f << 5)
596#define STATE1_SHIFT 5
597#define STATE2(x) ((x) << 10)
598#define STATE2_MASK (0x1f << 10)
599#define STATE2_SHIFT 10
600#define STATE3(x) ((x) << 15)
601#define STATE3_MASK (0x1f << 15)
602#define STATE3_SHIFT 15
603
604#define MC_SEQ_RAS_TIMING 0x28a0
605#define MC_SEQ_CAS_TIMING 0x28a4
606#define MC_SEQ_MISC_TIMING 0x28a8
607#define MC_SEQ_MISC_TIMING2 0x28ac
608#define MC_SEQ_PMG_TIMING 0x28b0
609#define MC_SEQ_RD_CTL_D0 0x28b4
610#define MC_SEQ_RD_CTL_D1 0x28b8
611#define MC_SEQ_WR_CTL_D0 0x28bc
612#define MC_SEQ_WR_CTL_D1 0x28c0
613
218#define MC_SEQ_SUP_CNTL 0x28c8 614#define MC_SEQ_SUP_CNTL 0x28c8
219#define RUN_MASK (1 << 0) 615#define RUN_MASK (1 << 0)
220#define MC_SEQ_SUP_PGM 0x28cc 616#define MC_SEQ_SUP_PGM 0x28cc
617#define MC_PMG_AUTO_CMD 0x28d0
221 618
222#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8 619#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
223#define TRAIN_DONE_D0 (1 << 30) 620#define TRAIN_DONE_D0 (1 << 30)
@@ -226,10 +623,92 @@
226#define MC_IO_PAD_CNTL_D0 0x29d0 623#define MC_IO_PAD_CNTL_D0 0x29d0
227#define MEM_FALL_OUT_CMD (1 << 8) 624#define MEM_FALL_OUT_CMD (1 << 8)
228 625
626#define MC_SEQ_MISC0 0x2a00
627#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
628#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
629#define MC_SEQ_MISC0_VEN_ID_VALUE 3
630#define MC_SEQ_MISC0_REV_ID_SHIFT 12
631#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
632#define MC_SEQ_MISC0_REV_ID_VALUE 1
633#define MC_SEQ_MISC0_GDDR5_SHIFT 28
634#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
635#define MC_SEQ_MISC0_GDDR5_VALUE 5
636#define MC_SEQ_MISC1 0x2a04
637#define MC_SEQ_RESERVE_M 0x2a08
638#define MC_PMG_CMD_EMRS 0x2a0c
639
229#define MC_SEQ_IO_DEBUG_INDEX 0x2a44 640#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
230#define MC_SEQ_IO_DEBUG_DATA 0x2a48 641#define MC_SEQ_IO_DEBUG_DATA 0x2a48
231 642
643#define MC_SEQ_MISC5 0x2a54
644#define MC_SEQ_MISC6 0x2a58
645
646#define MC_SEQ_MISC7 0x2a64
647
648#define MC_SEQ_RAS_TIMING_LP 0x2a6c
649#define MC_SEQ_CAS_TIMING_LP 0x2a70
650#define MC_SEQ_MISC_TIMING_LP 0x2a74
651#define MC_SEQ_MISC_TIMING2_LP 0x2a78
652#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
653#define MC_SEQ_WR_CTL_D1_LP 0x2a80
654#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
655#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
656
657#define MC_PMG_CMD_MRS 0x2aac
658
659#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
660#define MC_SEQ_RD_CTL_D1_LP 0x2b20
661
662#define MC_PMG_CMD_MRS1 0x2b44
663#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
664#define MC_SEQ_PMG_TIMING_LP 0x2b4c
665
666#define MC_SEQ_WR_CTL_2 0x2b54
667#define MC_SEQ_WR_CTL_2_LP 0x2b58
668#define MC_PMG_CMD_MRS2 0x2b5c
669#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
670
671#define MCLK_PWRMGT_CNTL 0x2ba0
672# define DLL_SPEED(x) ((x) << 0)
673# define DLL_SPEED_MASK (0x1f << 0)
674# define DLL_READY (1 << 6)
675# define MC_INT_CNTL (1 << 7)
676# define MRDCK0_PDNB (1 << 8)
677# define MRDCK1_PDNB (1 << 9)
678# define MRDCK0_RESET (1 << 16)
679# define MRDCK1_RESET (1 << 17)
680# define DLL_READY_READ (1 << 24)
681#define DLL_CNTL 0x2ba4
682# define MRDCK0_BYPASS (1 << 24)
683# define MRDCK1_BYPASS (1 << 25)
684
685#define MPLL_FUNC_CNTL 0x2bb4
686#define BWCTRL(x) ((x) << 20)
687#define BWCTRL_MASK (0xff << 20)
688#define MPLL_FUNC_CNTL_1 0x2bb8
689#define VCO_MODE(x) ((x) << 0)
690#define VCO_MODE_MASK (3 << 0)
691#define CLKFRAC(x) ((x) << 4)
692#define CLKFRAC_MASK (0xfff << 4)
693#define CLKF(x) ((x) << 16)
694#define CLKF_MASK (0xfff << 16)
695#define MPLL_FUNC_CNTL_2 0x2bbc
696#define MPLL_AD_FUNC_CNTL 0x2bc0
697#define YCLK_POST_DIV(x) ((x) << 0)
698#define YCLK_POST_DIV_MASK (7 << 0)
699#define MPLL_DQ_FUNC_CNTL 0x2bc4
700#define YCLK_SEL(x) ((x) << 4)
701#define YCLK_SEL_MASK (1 << 4)
702
703#define MPLL_SS1 0x2bcc
704#define CLKV(x) ((x) << 0)
705#define CLKV_MASK (0x3ffffff << 0)
706#define MPLL_SS2 0x2bd0
707#define CLKS(x) ((x) << 0)
708#define CLKS_MASK (0xfff << 0)
709
232#define HDP_HOST_PATH_CNTL 0x2C00 710#define HDP_HOST_PATH_CNTL 0x2C00
711#define CLOCK_GATING_DIS (1 << 23)
233#define HDP_NONSURFACE_BASE 0x2C04 712#define HDP_NONSURFACE_BASE 0x2C04
234#define HDP_NONSURFACE_INFO 0x2C08 713#define HDP_NONSURFACE_INFO 0x2C08
235#define HDP_NONSURFACE_SIZE 0x2C0C 714#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -237,6 +716,26 @@
237#define HDP_ADDR_CONFIG 0x2F48 716#define HDP_ADDR_CONFIG 0x2F48
238#define HDP_MISC_CNTL 0x2F4C 717#define HDP_MISC_CNTL 0x2F4C
239#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) 718#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
719#define HDP_MEM_POWER_LS 0x2F50
720#define HDP_LS_ENABLE (1 << 0)
721
722#define ATC_MISC_CG 0x3350
723
724#define MC_SEQ_CNTL_3 0x3600
725# define CAC_EN (1 << 31)
726#define MC_SEQ_G5PDX_CTRL 0x3604
727#define MC_SEQ_G5PDX_CTRL_LP 0x3608
728#define MC_SEQ_G5PDX_CMD0 0x360c
729#define MC_SEQ_G5PDX_CMD0_LP 0x3610
730#define MC_SEQ_G5PDX_CMD1 0x3614
731#define MC_SEQ_G5PDX_CMD1_LP 0x3618
732
733#define MC_SEQ_PMG_DVS_CTL 0x3628
734#define MC_SEQ_PMG_DVS_CTL_LP 0x362c
735#define MC_SEQ_PMG_DVS_CMD 0x3630
736#define MC_SEQ_PMG_DVS_CMD_LP 0x3634
737#define MC_SEQ_DLL_STBY 0x3638
738#define MC_SEQ_DLL_STBY_LP 0x363c
240 739
241#define IH_RB_CNTL 0x3e00 740#define IH_RB_CNTL 0x3e00
242# define IH_RB_ENABLE (1 << 0) 741# define IH_RB_ENABLE (1 << 0)
@@ -265,6 +764,9 @@
265# define MC_WR_CLEAN_CNT(x) ((x) << 20) 764# define MC_WR_CLEAN_CNT(x) ((x) << 20)
266# define MC_VMID(x) ((x) << 25) 765# define MC_VMID(x) ((x) << 25)
267 766
767#define BIF_LNCNT_RESET 0x5220
768# define RESET_LNCNT_EN (1 << 0)
769
268#define CONFIG_MEMSIZE 0x5428 770#define CONFIG_MEMSIZE 0x5428
269 771
270#define INTERRUPT_CNTL 0x5468 772#define INTERRUPT_CNTL 0x5468
@@ -401,6 +903,9 @@
401# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 903# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
402# define DC_HPDx_EN (1 << 28) 904# define DC_HPDx_EN (1 << 28)
403 905
906#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
907# define STUTTER_ENABLE (1 << 0)
908
404#define GRBM_CNTL 0x8000 909#define GRBM_CNTL 0x8000
405#define GRBM_READ_TIMEOUT(x) ((x) << 0) 910#define GRBM_READ_TIMEOUT(x) ((x) << 0)
406 911
@@ -504,6 +1009,9 @@
504 1009
505#define CP_RB0_RPTR 0x8700 1010#define CP_RB0_RPTR 0x8700
506#define CP_RB_WPTR_DELAY 0x8704 1011#define CP_RB_WPTR_DELAY 0x8704
1012#define CP_RB_WPTR_POLL_CNTL 0x8708
1013#define IDLE_POLL_COUNT(x) ((x) << 16)
1014#define IDLE_POLL_COUNT_MASK (0xffff << 16)
507 1015
508#define CP_MEQ_THRESHOLDS 0x8764 1016#define CP_MEQ_THRESHOLDS 0x8764
509#define MEQ1_START(x) ((x) << 0) 1017#define MEQ1_START(x) ((x) << 0)
@@ -730,6 +1238,9 @@
730# define CP_RINGID1_INT_STAT (1 << 30) 1238# define CP_RINGID1_INT_STAT (1 << 30)
731# define CP_RINGID0_INT_STAT (1 << 31) 1239# define CP_RINGID0_INT_STAT (1 << 31)
732 1240
1241#define CP_MEM_SLP_CNTL 0xC1E4
1242# define CP_MEM_LS_EN (1 << 0)
1243
733#define CP_CPF_DEBUG 0xC200 1244#define CP_CPF_DEBUG 0xC200
734 1245
735#define CP_PQ_WPTR_POLL_CNTL 0xC20C 1246#define CP_PQ_WPTR_POLL_CNTL 0xC20C
@@ -775,14 +1286,20 @@
775 1286
776#define RLC_MC_CNTL 0xC30C 1287#define RLC_MC_CNTL 0xC30C
777 1288
1289#define RLC_MEM_SLP_CNTL 0xC318
1290# define RLC_MEM_LS_EN (1 << 0)
1291
778#define RLC_LB_CNTR_MAX 0xC348 1292#define RLC_LB_CNTR_MAX 0xC348
779 1293
780#define RLC_LB_CNTL 0xC364 1294#define RLC_LB_CNTL 0xC364
1295# define LOAD_BALANCE_ENABLE (1 << 0)
781 1296
782#define RLC_LB_CNTR_INIT 0xC36C 1297#define RLC_LB_CNTR_INIT 0xC36C
783 1298
784#define RLC_SAVE_AND_RESTORE_BASE 0xC374 1299#define RLC_SAVE_AND_RESTORE_BASE 0xC374
785#define RLC_DRIVER_DMA_STATUS 0xC378 1300#define RLC_DRIVER_DMA_STATUS 0xC378 /* dGPU */
1301#define RLC_CP_TABLE_RESTORE 0xC378 /* APU */
1302#define RLC_PG_DELAY_2 0xC37C
786 1303
787#define RLC_GPM_UCODE_ADDR 0xC388 1304#define RLC_GPM_UCODE_ADDR 0xC388
788#define RLC_GPM_UCODE_DATA 0xC38C 1305#define RLC_GPM_UCODE_DATA 0xC38C
@@ -791,12 +1308,52 @@
791#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC398 1308#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC398
792#define RLC_UCODE_CNTL 0xC39C 1309#define RLC_UCODE_CNTL 0xC39C
793 1310
1311#define RLC_GPM_STAT 0xC400
1312# define RLC_GPM_BUSY (1 << 0)
1313# define GFX_POWER_STATUS (1 << 1)
1314# define GFX_CLOCK_STATUS (1 << 2)
1315
1316#define RLC_PG_CNTL 0xC40C
1317# define GFX_PG_ENABLE (1 << 0)
1318# define GFX_PG_SRC (1 << 1)
1319# define DYN_PER_CU_PG_ENABLE (1 << 2)
1320# define STATIC_PER_CU_PG_ENABLE (1 << 3)
1321# define DISABLE_GDS_PG (1 << 13)
1322# define DISABLE_CP_PG (1 << 15)
1323# define SMU_CLK_SLOWDOWN_ON_PU_ENABLE (1 << 17)
1324# define SMU_CLK_SLOWDOWN_ON_PD_ENABLE (1 << 18)
1325
1326#define RLC_CGTT_MGCG_OVERRIDE 0xC420
794#define RLC_CGCG_CGLS_CTRL 0xC424 1327#define RLC_CGCG_CGLS_CTRL 0xC424
1328# define CGCG_EN (1 << 0)
1329# define CGLS_EN (1 << 1)
1330
1331#define RLC_PG_DELAY 0xC434
795 1332
796#define RLC_LB_INIT_CU_MASK 0xC43C 1333#define RLC_LB_INIT_CU_MASK 0xC43C
797 1334
798#define RLC_LB_PARAMS 0xC444 1335#define RLC_LB_PARAMS 0xC444
799 1336
1337#define RLC_PG_AO_CU_MASK 0xC44C
1338
1339#define RLC_MAX_PG_CU 0xC450
1340# define MAX_PU_CU(x) ((x) << 0)
1341# define MAX_PU_CU_MASK (0xff << 0)
1342#define RLC_AUTO_PG_CTRL 0xC454
1343# define AUTO_PG_EN (1 << 0)
1344# define GRBM_REG_SGIT(x) ((x) << 3)
1345# define GRBM_REG_SGIT_MASK (0xffff << 3)
1346
1347#define RLC_SERDES_WR_CU_MASTER_MASK 0xC474
1348#define RLC_SERDES_WR_NONCU_MASTER_MASK 0xC478
1349#define RLC_SERDES_WR_CTRL 0xC47C
1350#define BPM_ADDR(x) ((x) << 0)
1351#define BPM_ADDR_MASK (0xff << 0)
1352#define CGLS_ENABLE (1 << 16)
1353#define CGCG_OVERRIDE_0 (1 << 20)
1354#define MGCG_OVERRIDE_0 (1 << 22)
1355#define MGCG_OVERRIDE_1 (1 << 23)
1356
800#define RLC_SERDES_CU_MASTER_BUSY 0xC484 1357#define RLC_SERDES_CU_MASTER_BUSY 0xC484
801#define RLC_SERDES_NONCU_MASTER_BUSY 0xC488 1358#define RLC_SERDES_NONCU_MASTER_BUSY 0xC488
802# define SE_MASTER_BUSY_MASK 0x0000ffff 1359# define SE_MASTER_BUSY_MASK 0x0000ffff
@@ -807,6 +1364,13 @@
807#define RLC_GPM_SCRATCH_ADDR 0xC4B0 1364#define RLC_GPM_SCRATCH_ADDR 0xC4B0
808#define RLC_GPM_SCRATCH_DATA 0xC4B4 1365#define RLC_GPM_SCRATCH_DATA 0xC4B4
809 1366
1367#define RLC_GPR_REG2 0xC4E8
1368#define REQ 0x00000001
1369#define MESSAGE(x) ((x) << 1)
1370#define MESSAGE_MASK 0x0000001e
1371#define MSG_ENTER_RLC_SAFE_MODE 1
1372#define MSG_EXIT_RLC_SAFE_MODE 0
1373
810#define CP_HPD_EOP_BASE_ADDR 0xC904 1374#define CP_HPD_EOP_BASE_ADDR 0xC904
811#define CP_HPD_EOP_BASE_ADDR_HI 0xC908 1375#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
812#define CP_HPD_EOP_VMID 0xC90C 1376#define CP_HPD_EOP_VMID 0xC90C
@@ -851,6 +1415,8 @@
851#define MQD_VMID(x) ((x) << 0) 1415#define MQD_VMID(x) ((x) << 0)
852#define MQD_VMID_MASK (0xf << 0) 1416#define MQD_VMID_MASK (0xf << 0)
853 1417
1418#define DB_RENDER_CONTROL 0x28000
1419
854#define PA_SC_RASTER_CONFIG 0x28350 1420#define PA_SC_RASTER_CONFIG 0x28350
855# define RASTER_CONFIG_RB_MAP_0 0 1421# define RASTER_CONFIG_RB_MAP_0 0
856# define RASTER_CONFIG_RB_MAP_1 1 1422# define RASTER_CONFIG_RB_MAP_1 1
@@ -944,6 +1510,16 @@
944 1510
945#define CP_PERFMON_CNTL 0x36020 1511#define CP_PERFMON_CNTL 0x36020
946 1512
1513#define CGTS_SM_CTRL_REG 0x3c000
1514#define SM_MODE(x) ((x) << 17)
1515#define SM_MODE_MASK (0x7 << 17)
1516#define SM_MODE_ENABLE (1 << 20)
1517#define CGTS_OVERRIDE (1 << 21)
1518#define CGTS_LS_OVERRIDE (1 << 22)
1519#define ON_MONITOR_ADD_EN (1 << 23)
1520#define ON_MONITOR_ADD(x) ((x) << 24)
1521#define ON_MONITOR_ADD_MASK (0xff << 24)
1522
947#define CGTS_TCC_DISABLE 0x3c00c 1523#define CGTS_TCC_DISABLE 0x3c00c
948#define CGTS_USER_TCC_DISABLE 0x3c010 1524#define CGTS_USER_TCC_DISABLE 0x3c010
949#define TCC_DISABLE_MASK 0xFFFF0000 1525#define TCC_DISABLE_MASK 0xFFFF0000
@@ -1176,6 +1752,8 @@
1176 1752
1177#define SDMA0_UCODE_ADDR 0xD000 1753#define SDMA0_UCODE_ADDR 0xD000
1178#define SDMA0_UCODE_DATA 0xD004 1754#define SDMA0_UCODE_DATA 0xD004
1755#define SDMA0_POWER_CNTL 0xD008
1756#define SDMA0_CLK_CTRL 0xD00C
1179 1757
1180#define SDMA0_CNTL 0xD010 1758#define SDMA0_CNTL 0xD010
1181# define TRAP_ENABLE (1 << 0) 1759# define TRAP_ENABLE (1 << 0)
@@ -1300,6 +1878,13 @@
1300#define UVD_RBC_RB_RPTR 0xf690 1878#define UVD_RBC_RB_RPTR 0xf690
1301#define UVD_RBC_RB_WPTR 0xf694 1879#define UVD_RBC_RB_WPTR 0xf694
1302 1880
1881#define UVD_CGC_CTRL 0xF4B0
1882# define DCM (1 << 0)
1883# define CG_DT(x) ((x) << 2)
1884# define CG_DT_MASK (0xf << 2)
1885# define CLK_OD(x) ((x) << 6)
1886# define CLK_OD_MASK (0x1f << 6)
1887
1303/* UVD clocks */ 1888/* UVD clocks */
1304 1889
1305#define CG_DCLK_CNTL 0xC050009C 1890#define CG_DCLK_CNTL 0xC050009C
@@ -1310,4 +1895,7 @@
1310#define CG_VCLK_CNTL 0xC05000A4 1895#define CG_VCLK_CNTL 0xC05000A4
1311#define CG_VCLK_STATUS 0xC05000A8 1896#define CG_VCLK_STATUS 0xC05000A8
1312 1897
1898/* UVD CTX indirect */
1899#define UVD_CGC_MEM_CTRL 0xC0
1900
1313#endif 1901#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index c00339440c5e..aa908c55a513 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -1073,7 +1073,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] =
1073 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 }, 1073 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
1074 { 0, 0, 0 } 1074 { 0, 0, 0 }
1075}; 1075};
1076struct cs_section_def cayman_cs_data[] = { 1076static const struct cs_section_def cayman_cs_data[] = {
1077 { SECT_CONTEXT_defs, SECT_CONTEXT }, 1077 { SECT_CONTEXT_defs, SECT_CONTEXT },
1078 { SECT_CLEAR_defs, SECT_CLEAR }, 1078 { SECT_CLEAR_defs, SECT_CLEAR },
1079 { SECT_CTRLCONST_defs, SECT_CTRLCONST }, 1079 { SECT_CTRLCONST_defs, SECT_CTRLCONST },
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
new file mode 100644
index 000000000000..c3982f9475fb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -0,0 +1,944 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24static const unsigned int ci_SECT_CONTEXT_def_1[] =
25{
26 0x00000000, // DB_RENDER_CONTROL
27 0x00000000, // DB_COUNT_CONTROL
28 0x00000000, // DB_DEPTH_VIEW
29 0x00000000, // DB_RENDER_OVERRIDE
30 0x00000000, // DB_RENDER_OVERRIDE2
31 0x00000000, // DB_HTILE_DATA_BASE
32 0, // HOLE
33 0, // HOLE
34 0x00000000, // DB_DEPTH_BOUNDS_MIN
35 0x00000000, // DB_DEPTH_BOUNDS_MAX
36 0x00000000, // DB_STENCIL_CLEAR
37 0x00000000, // DB_DEPTH_CLEAR
38 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
39 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
40 0, // HOLE
41 0x00000000, // DB_DEPTH_INFO
42 0x00000000, // DB_Z_INFO
43 0x00000000, // DB_STENCIL_INFO
44 0x00000000, // DB_Z_READ_BASE
45 0x00000000, // DB_STENCIL_READ_BASE
46 0x00000000, // DB_Z_WRITE_BASE
47 0x00000000, // DB_STENCIL_WRITE_BASE
48 0x00000000, // DB_DEPTH_SIZE
49 0x00000000, // DB_DEPTH_SLICE
50 0, // HOLE
51 0, // HOLE
52 0, // HOLE
53 0, // HOLE
54 0, // HOLE
55 0, // HOLE
56 0, // HOLE
57 0, // HOLE
58 0x00000000, // TA_BC_BASE_ADDR
59 0x00000000, // TA_BC_BASE_ADDR_HI
60 0, // HOLE
61 0, // HOLE
62 0, // HOLE
63 0, // HOLE
64 0, // HOLE
65 0, // HOLE
66 0, // HOLE
67 0, // HOLE
68 0, // HOLE
69 0, // HOLE
70 0, // HOLE
71 0, // HOLE
72 0, // HOLE
73 0, // HOLE
74 0, // HOLE
75 0, // HOLE
76 0, // HOLE
77 0, // HOLE
78 0, // HOLE
79 0, // HOLE
80 0, // HOLE
81 0, // HOLE
82 0, // HOLE
83 0, // HOLE
84 0, // HOLE
85 0, // HOLE
86 0, // HOLE
87 0, // HOLE
88 0, // HOLE
89 0, // HOLE
90 0, // HOLE
91 0, // HOLE
92 0, // HOLE
93 0, // HOLE
94 0, // HOLE
95 0, // HOLE
96 0, // HOLE
97 0, // HOLE
98 0, // HOLE
99 0, // HOLE
100 0, // HOLE
101 0, // HOLE
102 0, // HOLE
103 0, // HOLE
104 0, // HOLE
105 0, // HOLE
106 0, // HOLE
107 0, // HOLE
108 0, // HOLE
109 0, // HOLE
110 0, // HOLE
111 0, // HOLE
112 0, // HOLE
113 0, // HOLE
114 0, // HOLE
115 0, // HOLE
116 0, // HOLE
117 0, // HOLE
118 0, // HOLE
119 0, // HOLE
120 0, // HOLE
121 0, // HOLE
122 0, // HOLE
123 0, // HOLE
124 0, // HOLE
125 0, // HOLE
126 0, // HOLE
127 0, // HOLE
128 0, // HOLE
129 0, // HOLE
130 0, // HOLE
131 0, // HOLE
132 0, // HOLE
133 0, // HOLE
134 0, // HOLE
135 0, // HOLE
136 0, // HOLE
137 0, // HOLE
138 0, // HOLE
139 0, // HOLE
140 0, // HOLE
141 0, // HOLE
142 0, // HOLE
143 0, // HOLE
144 0, // HOLE
145 0, // HOLE
146 0, // HOLE
147 0, // HOLE
148 0x00000000, // COHER_DEST_BASE_HI_0
149 0x00000000, // COHER_DEST_BASE_HI_1
150 0x00000000, // COHER_DEST_BASE_HI_2
151 0x00000000, // COHER_DEST_BASE_HI_3
152 0x00000000, // COHER_DEST_BASE_2
153 0x00000000, // COHER_DEST_BASE_3
154 0x00000000, // PA_SC_WINDOW_OFFSET
155 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
156 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
157 0x0000ffff, // PA_SC_CLIPRECT_RULE
158 0x00000000, // PA_SC_CLIPRECT_0_TL
159 0x40004000, // PA_SC_CLIPRECT_0_BR
160 0x00000000, // PA_SC_CLIPRECT_1_TL
161 0x40004000, // PA_SC_CLIPRECT_1_BR
162 0x00000000, // PA_SC_CLIPRECT_2_TL
163 0x40004000, // PA_SC_CLIPRECT_2_BR
164 0x00000000, // PA_SC_CLIPRECT_3_TL
165 0x40004000, // PA_SC_CLIPRECT_3_BR
166 0xaa99aaaa, // PA_SC_EDGERULE
167 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
168 0xffffffff, // CB_TARGET_MASK
169 0xffffffff, // CB_SHADER_MASK
170 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
171 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
172 0x00000000, // COHER_DEST_BASE_0
173 0x00000000, // COHER_DEST_BASE_1
174 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
175 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
176 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
177 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
178 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
179 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
180 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
181 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
182 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
183 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
184 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
185 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
186 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
187 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
188 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
189 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
190 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
191 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
192 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
193 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
194 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
195 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
196 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
197 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
198 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
199 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
200 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
201 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
202 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
203 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
204 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
205 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
206 0x00000000, // PA_SC_VPORT_ZMIN_0
207 0x3f800000, // PA_SC_VPORT_ZMAX_0
208 0x00000000, // PA_SC_VPORT_ZMIN_1
209 0x3f800000, // PA_SC_VPORT_ZMAX_1
210 0x00000000, // PA_SC_VPORT_ZMIN_2
211 0x3f800000, // PA_SC_VPORT_ZMAX_2
212 0x00000000, // PA_SC_VPORT_ZMIN_3
213 0x3f800000, // PA_SC_VPORT_ZMAX_3
214 0x00000000, // PA_SC_VPORT_ZMIN_4
215 0x3f800000, // PA_SC_VPORT_ZMAX_4
216 0x00000000, // PA_SC_VPORT_ZMIN_5
217 0x3f800000, // PA_SC_VPORT_ZMAX_5
218 0x00000000, // PA_SC_VPORT_ZMIN_6
219 0x3f800000, // PA_SC_VPORT_ZMAX_6
220 0x00000000, // PA_SC_VPORT_ZMIN_7
221 0x3f800000, // PA_SC_VPORT_ZMAX_7
222 0x00000000, // PA_SC_VPORT_ZMIN_8
223 0x3f800000, // PA_SC_VPORT_ZMAX_8
224 0x00000000, // PA_SC_VPORT_ZMIN_9
225 0x3f800000, // PA_SC_VPORT_ZMAX_9
226 0x00000000, // PA_SC_VPORT_ZMIN_10
227 0x3f800000, // PA_SC_VPORT_ZMAX_10
228 0x00000000, // PA_SC_VPORT_ZMIN_11
229 0x3f800000, // PA_SC_VPORT_ZMAX_11
230 0x00000000, // PA_SC_VPORT_ZMIN_12
231 0x3f800000, // PA_SC_VPORT_ZMAX_12
232 0x00000000, // PA_SC_VPORT_ZMIN_13
233 0x3f800000, // PA_SC_VPORT_ZMAX_13
234 0x00000000, // PA_SC_VPORT_ZMIN_14
235 0x3f800000, // PA_SC_VPORT_ZMAX_14
236 0x00000000, // PA_SC_VPORT_ZMIN_15
237 0x3f800000, // PA_SC_VPORT_ZMAX_15
238};
239static const unsigned int ci_SECT_CONTEXT_def_2[] =
240{
241 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
242 0, // HOLE
243 0x00000000, // CP_PERFMON_CNTX_CNTL
244 0x00000000, // CP_RINGID
245 0x00000000, // CP_VMID
246 0, // HOLE
247 0, // HOLE
248 0, // HOLE
249 0, // HOLE
250 0, // HOLE
251 0, // HOLE
252 0, // HOLE
253 0, // HOLE
254 0, // HOLE
255 0, // HOLE
256 0, // HOLE
257 0, // HOLE
258 0, // HOLE
259 0, // HOLE
260 0, // HOLE
261 0, // HOLE
262 0, // HOLE
263 0, // HOLE
264 0, // HOLE
265 0, // HOLE
266 0, // HOLE
267 0, // HOLE
268 0, // HOLE
269 0, // HOLE
270 0, // HOLE
271 0, // HOLE
272 0, // HOLE
273 0, // HOLE
274 0, // HOLE
275 0, // HOLE
276 0, // HOLE
277 0, // HOLE
278 0, // HOLE
279 0, // HOLE
280 0, // HOLE
281 0, // HOLE
282 0, // HOLE
283 0xffffffff, // VGT_MAX_VTX_INDX
284 0x00000000, // VGT_MIN_VTX_INDX
285 0x00000000, // VGT_INDX_OFFSET
286 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
287 0, // HOLE
288 0x00000000, // CB_BLEND_RED
289 0x00000000, // CB_BLEND_GREEN
290 0x00000000, // CB_BLEND_BLUE
291 0x00000000, // CB_BLEND_ALPHA
292 0, // HOLE
293 0, // HOLE
294 0x00000000, // DB_STENCIL_CONTROL
295 0x00000000, // DB_STENCILREFMASK
296 0x00000000, // DB_STENCILREFMASK_BF
297 0, // HOLE
298 0x00000000, // PA_CL_VPORT_XSCALE
299 0x00000000, // PA_CL_VPORT_XOFFSET
300 0x00000000, // PA_CL_VPORT_YSCALE
301 0x00000000, // PA_CL_VPORT_YOFFSET
302 0x00000000, // PA_CL_VPORT_ZSCALE
303 0x00000000, // PA_CL_VPORT_ZOFFSET
304 0x00000000, // PA_CL_VPORT_XSCALE_1
305 0x00000000, // PA_CL_VPORT_XOFFSET_1
306 0x00000000, // PA_CL_VPORT_YSCALE_1
307 0x00000000, // PA_CL_VPORT_YOFFSET_1
308 0x00000000, // PA_CL_VPORT_ZSCALE_1
309 0x00000000, // PA_CL_VPORT_ZOFFSET_1
310 0x00000000, // PA_CL_VPORT_XSCALE_2
311 0x00000000, // PA_CL_VPORT_XOFFSET_2
312 0x00000000, // PA_CL_VPORT_YSCALE_2
313 0x00000000, // PA_CL_VPORT_YOFFSET_2
314 0x00000000, // PA_CL_VPORT_ZSCALE_2
315 0x00000000, // PA_CL_VPORT_ZOFFSET_2
316 0x00000000, // PA_CL_VPORT_XSCALE_3
317 0x00000000, // PA_CL_VPORT_XOFFSET_3
318 0x00000000, // PA_CL_VPORT_YSCALE_3
319 0x00000000, // PA_CL_VPORT_YOFFSET_3
320 0x00000000, // PA_CL_VPORT_ZSCALE_3
321 0x00000000, // PA_CL_VPORT_ZOFFSET_3
322 0x00000000, // PA_CL_VPORT_XSCALE_4
323 0x00000000, // PA_CL_VPORT_XOFFSET_4
324 0x00000000, // PA_CL_VPORT_YSCALE_4
325 0x00000000, // PA_CL_VPORT_YOFFSET_4
326 0x00000000, // PA_CL_VPORT_ZSCALE_4
327 0x00000000, // PA_CL_VPORT_ZOFFSET_4
328 0x00000000, // PA_CL_VPORT_XSCALE_5
329 0x00000000, // PA_CL_VPORT_XOFFSET_5
330 0x00000000, // PA_CL_VPORT_YSCALE_5
331 0x00000000, // PA_CL_VPORT_YOFFSET_5
332 0x00000000, // PA_CL_VPORT_ZSCALE_5
333 0x00000000, // PA_CL_VPORT_ZOFFSET_5
334 0x00000000, // PA_CL_VPORT_XSCALE_6
335 0x00000000, // PA_CL_VPORT_XOFFSET_6
336 0x00000000, // PA_CL_VPORT_YSCALE_6
337 0x00000000, // PA_CL_VPORT_YOFFSET_6
338 0x00000000, // PA_CL_VPORT_ZSCALE_6
339 0x00000000, // PA_CL_VPORT_ZOFFSET_6
340 0x00000000, // PA_CL_VPORT_XSCALE_7
341 0x00000000, // PA_CL_VPORT_XOFFSET_7
342 0x00000000, // PA_CL_VPORT_YSCALE_7
343 0x00000000, // PA_CL_VPORT_YOFFSET_7
344 0x00000000, // PA_CL_VPORT_ZSCALE_7
345 0x00000000, // PA_CL_VPORT_ZOFFSET_7
346 0x00000000, // PA_CL_VPORT_XSCALE_8
347 0x00000000, // PA_CL_VPORT_XOFFSET_8
348 0x00000000, // PA_CL_VPORT_YSCALE_8
349 0x00000000, // PA_CL_VPORT_YOFFSET_8
350 0x00000000, // PA_CL_VPORT_ZSCALE_8
351 0x00000000, // PA_CL_VPORT_ZOFFSET_8
352 0x00000000, // PA_CL_VPORT_XSCALE_9
353 0x00000000, // PA_CL_VPORT_XOFFSET_9
354 0x00000000, // PA_CL_VPORT_YSCALE_9
355 0x00000000, // PA_CL_VPORT_YOFFSET_9
356 0x00000000, // PA_CL_VPORT_ZSCALE_9
357 0x00000000, // PA_CL_VPORT_ZOFFSET_9
358 0x00000000, // PA_CL_VPORT_XSCALE_10
359 0x00000000, // PA_CL_VPORT_XOFFSET_10
360 0x00000000, // PA_CL_VPORT_YSCALE_10
361 0x00000000, // PA_CL_VPORT_YOFFSET_10
362 0x00000000, // PA_CL_VPORT_ZSCALE_10
363 0x00000000, // PA_CL_VPORT_ZOFFSET_10
364 0x00000000, // PA_CL_VPORT_XSCALE_11
365 0x00000000, // PA_CL_VPORT_XOFFSET_11
366 0x00000000, // PA_CL_VPORT_YSCALE_11
367 0x00000000, // PA_CL_VPORT_YOFFSET_11
368 0x00000000, // PA_CL_VPORT_ZSCALE_11
369 0x00000000, // PA_CL_VPORT_ZOFFSET_11
370 0x00000000, // PA_CL_VPORT_XSCALE_12
371 0x00000000, // PA_CL_VPORT_XOFFSET_12
372 0x00000000, // PA_CL_VPORT_YSCALE_12
373 0x00000000, // PA_CL_VPORT_YOFFSET_12
374 0x00000000, // PA_CL_VPORT_ZSCALE_12
375 0x00000000, // PA_CL_VPORT_ZOFFSET_12
376 0x00000000, // PA_CL_VPORT_XSCALE_13
377 0x00000000, // PA_CL_VPORT_XOFFSET_13
378 0x00000000, // PA_CL_VPORT_YSCALE_13
379 0x00000000, // PA_CL_VPORT_YOFFSET_13
380 0x00000000, // PA_CL_VPORT_ZSCALE_13
381 0x00000000, // PA_CL_VPORT_ZOFFSET_13
382 0x00000000, // PA_CL_VPORT_XSCALE_14
383 0x00000000, // PA_CL_VPORT_XOFFSET_14
384 0x00000000, // PA_CL_VPORT_YSCALE_14
385 0x00000000, // PA_CL_VPORT_YOFFSET_14
386 0x00000000, // PA_CL_VPORT_ZSCALE_14
387 0x00000000, // PA_CL_VPORT_ZOFFSET_14
388 0x00000000, // PA_CL_VPORT_XSCALE_15
389 0x00000000, // PA_CL_VPORT_XOFFSET_15
390 0x00000000, // PA_CL_VPORT_YSCALE_15
391 0x00000000, // PA_CL_VPORT_YOFFSET_15
392 0x00000000, // PA_CL_VPORT_ZSCALE_15
393 0x00000000, // PA_CL_VPORT_ZOFFSET_15
394 0x00000000, // PA_CL_UCP_0_X
395 0x00000000, // PA_CL_UCP_0_Y
396 0x00000000, // PA_CL_UCP_0_Z
397 0x00000000, // PA_CL_UCP_0_W
398 0x00000000, // PA_CL_UCP_1_X
399 0x00000000, // PA_CL_UCP_1_Y
400 0x00000000, // PA_CL_UCP_1_Z
401 0x00000000, // PA_CL_UCP_1_W
402 0x00000000, // PA_CL_UCP_2_X
403 0x00000000, // PA_CL_UCP_2_Y
404 0x00000000, // PA_CL_UCP_2_Z
405 0x00000000, // PA_CL_UCP_2_W
406 0x00000000, // PA_CL_UCP_3_X
407 0x00000000, // PA_CL_UCP_3_Y
408 0x00000000, // PA_CL_UCP_3_Z
409 0x00000000, // PA_CL_UCP_3_W
410 0x00000000, // PA_CL_UCP_4_X
411 0x00000000, // PA_CL_UCP_4_Y
412 0x00000000, // PA_CL_UCP_4_Z
413 0x00000000, // PA_CL_UCP_4_W
414 0x00000000, // PA_CL_UCP_5_X
415 0x00000000, // PA_CL_UCP_5_Y
416 0x00000000, // PA_CL_UCP_5_Z
417 0x00000000, // PA_CL_UCP_5_W
418 0, // HOLE
419 0, // HOLE
420 0, // HOLE
421 0, // HOLE
422 0, // HOLE
423 0, // HOLE
424 0, // HOLE
425 0, // HOLE
426 0, // HOLE
427 0, // HOLE
428 0x00000000, // SPI_PS_INPUT_CNTL_0
429 0x00000000, // SPI_PS_INPUT_CNTL_1
430 0x00000000, // SPI_PS_INPUT_CNTL_2
431 0x00000000, // SPI_PS_INPUT_CNTL_3
432 0x00000000, // SPI_PS_INPUT_CNTL_4
433 0x00000000, // SPI_PS_INPUT_CNTL_5
434 0x00000000, // SPI_PS_INPUT_CNTL_6
435 0x00000000, // SPI_PS_INPUT_CNTL_7
436 0x00000000, // SPI_PS_INPUT_CNTL_8
437 0x00000000, // SPI_PS_INPUT_CNTL_9
438 0x00000000, // SPI_PS_INPUT_CNTL_10
439 0x00000000, // SPI_PS_INPUT_CNTL_11
440 0x00000000, // SPI_PS_INPUT_CNTL_12
441 0x00000000, // SPI_PS_INPUT_CNTL_13
442 0x00000000, // SPI_PS_INPUT_CNTL_14
443 0x00000000, // SPI_PS_INPUT_CNTL_15
444 0x00000000, // SPI_PS_INPUT_CNTL_16
445 0x00000000, // SPI_PS_INPUT_CNTL_17
446 0x00000000, // SPI_PS_INPUT_CNTL_18
447 0x00000000, // SPI_PS_INPUT_CNTL_19
448 0x00000000, // SPI_PS_INPUT_CNTL_20
449 0x00000000, // SPI_PS_INPUT_CNTL_21
450 0x00000000, // SPI_PS_INPUT_CNTL_22
451 0x00000000, // SPI_PS_INPUT_CNTL_23
452 0x00000000, // SPI_PS_INPUT_CNTL_24
453 0x00000000, // SPI_PS_INPUT_CNTL_25
454 0x00000000, // SPI_PS_INPUT_CNTL_26
455 0x00000000, // SPI_PS_INPUT_CNTL_27
456 0x00000000, // SPI_PS_INPUT_CNTL_28
457 0x00000000, // SPI_PS_INPUT_CNTL_29
458 0x00000000, // SPI_PS_INPUT_CNTL_30
459 0x00000000, // SPI_PS_INPUT_CNTL_31
460 0x00000000, // SPI_VS_OUT_CONFIG
461 0, // HOLE
462 0x00000000, // SPI_PS_INPUT_ENA
463 0x00000000, // SPI_PS_INPUT_ADDR
464 0x00000000, // SPI_INTERP_CONTROL_0
465 0x00000002, // SPI_PS_IN_CONTROL
466 0, // HOLE
467 0x00000000, // SPI_BARYC_CNTL
468 0, // HOLE
469 0x00000000, // SPI_TMPRING_SIZE
470 0, // HOLE
471 0, // HOLE
472 0, // HOLE
473 0, // HOLE
474 0, // HOLE
475 0, // HOLE
476 0, // HOLE
477 0, // HOLE
478 0x00000000, // SPI_SHADER_POS_FORMAT
479 0x00000000, // SPI_SHADER_Z_FORMAT
480 0x00000000, // SPI_SHADER_COL_FORMAT
481 0, // HOLE
482 0, // HOLE
483 0, // HOLE
484 0, // HOLE
485 0, // HOLE
486 0, // HOLE
487 0, // HOLE
488 0, // HOLE
489 0, // HOLE
490 0, // HOLE
491 0, // HOLE
492 0, // HOLE
493 0, // HOLE
494 0, // HOLE
495 0, // HOLE
496 0, // HOLE
497 0, // HOLE
498 0, // HOLE
499 0, // HOLE
500 0, // HOLE
501 0, // HOLE
502 0, // HOLE
503 0, // HOLE
504 0, // HOLE
505 0, // HOLE
506 0, // HOLE
507 0x00000000, // CB_BLEND0_CONTROL
508 0x00000000, // CB_BLEND1_CONTROL
509 0x00000000, // CB_BLEND2_CONTROL
510 0x00000000, // CB_BLEND3_CONTROL
511 0x00000000, // CB_BLEND4_CONTROL
512 0x00000000, // CB_BLEND5_CONTROL
513 0x00000000, // CB_BLEND6_CONTROL
514 0x00000000, // CB_BLEND7_CONTROL
515};
516static const unsigned int ci_SECT_CONTEXT_def_3[] =
517{
518 0x00000000, // PA_CL_POINT_X_RAD
519 0x00000000, // PA_CL_POINT_Y_RAD
520 0x00000000, // PA_CL_POINT_SIZE
521 0x00000000, // PA_CL_POINT_CULL_RAD
522 0x00000000, // VGT_DMA_BASE_HI
523 0x00000000, // VGT_DMA_BASE
524};
525static const unsigned int ci_SECT_CONTEXT_def_4[] =
526{
527 0x00000000, // DB_DEPTH_CONTROL
528 0x00000000, // DB_EQAA
529 0x00000000, // CB_COLOR_CONTROL
530 0x00000000, // DB_SHADER_CONTROL
531 0x00090000, // PA_CL_CLIP_CNTL
532 0x00000004, // PA_SU_SC_MODE_CNTL
533 0x00000000, // PA_CL_VTE_CNTL
534 0x00000000, // PA_CL_VS_OUT_CNTL
535 0x00000000, // PA_CL_NANINF_CNTL
536 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
537 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
538 0x00000000, // PA_SU_PRIM_FILTER_CNTL
539 0, // HOLE
540 0, // HOLE
541 0, // HOLE
542 0, // HOLE
543 0, // HOLE
544 0, // HOLE
545 0, // HOLE
546 0, // HOLE
547 0, // HOLE
548 0, // HOLE
549 0, // HOLE
550 0, // HOLE
551 0, // HOLE
552 0, // HOLE
553 0, // HOLE
554 0, // HOLE
555 0, // HOLE
556 0, // HOLE
557 0, // HOLE
558 0, // HOLE
559 0, // HOLE
560 0, // HOLE
561 0, // HOLE
562 0, // HOLE
563 0, // HOLE
564 0, // HOLE
565 0, // HOLE
566 0, // HOLE
567 0, // HOLE
568 0, // HOLE
569 0, // HOLE
570 0, // HOLE
571 0, // HOLE
572 0, // HOLE
573 0, // HOLE
574 0, // HOLE
575 0, // HOLE
576 0, // HOLE
577 0, // HOLE
578 0, // HOLE
579 0, // HOLE
580 0, // HOLE
581 0, // HOLE
582 0, // HOLE
583 0, // HOLE
584 0, // HOLE
585 0, // HOLE
586 0, // HOLE
587 0, // HOLE
588 0, // HOLE
589 0, // HOLE
590 0, // HOLE
591 0, // HOLE
592 0, // HOLE
593 0, // HOLE
594 0, // HOLE
595 0, // HOLE
596 0, // HOLE
597 0, // HOLE
598 0, // HOLE
599 0, // HOLE
600 0, // HOLE
601 0, // HOLE
602 0, // HOLE
603 0, // HOLE
604 0, // HOLE
605 0, // HOLE
606 0, // HOLE
607 0, // HOLE
608 0, // HOLE
609 0, // HOLE
610 0, // HOLE
611 0, // HOLE
612 0, // HOLE
613 0, // HOLE
614 0, // HOLE
615 0, // HOLE
616 0, // HOLE
617 0, // HOLE
618 0, // HOLE
619 0, // HOLE
620 0, // HOLE
621 0, // HOLE
622 0, // HOLE
623 0, // HOLE
624 0, // HOLE
625 0, // HOLE
626 0, // HOLE
627 0, // HOLE
628 0, // HOLE
629 0, // HOLE
630 0, // HOLE
631 0, // HOLE
632 0, // HOLE
633 0, // HOLE
634 0, // HOLE
635 0, // HOLE
636 0, // HOLE
637 0, // HOLE
638 0, // HOLE
639 0, // HOLE
640 0, // HOLE
641 0, // HOLE
642 0, // HOLE
643 0, // HOLE
644 0, // HOLE
645 0, // HOLE
646 0, // HOLE
647 0, // HOLE
648 0, // HOLE
649 0, // HOLE
650 0, // HOLE
651 0, // HOLE
652 0, // HOLE
653 0, // HOLE
654 0, // HOLE
655 0x00000000, // PA_SU_POINT_SIZE
656 0x00000000, // PA_SU_POINT_MINMAX
657 0x00000000, // PA_SU_LINE_CNTL
658 0x00000000, // PA_SC_LINE_STIPPLE
659 0x00000000, // VGT_OUTPUT_PATH_CNTL
660 0x00000000, // VGT_HOS_CNTL
661 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
662 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
663 0x00000000, // VGT_HOS_REUSE_DEPTH
664 0x00000000, // VGT_GROUP_PRIM_TYPE
665 0x00000000, // VGT_GROUP_FIRST_DECR
666 0x00000000, // VGT_GROUP_DECR
667 0x00000000, // VGT_GROUP_VECT_0_CNTL
668 0x00000000, // VGT_GROUP_VECT_1_CNTL
669 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
670 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
671 0x00000000, // VGT_GS_MODE
672 0x00000000, // VGT_GS_ONCHIP_CNTL
673 0x00000000, // PA_SC_MODE_CNTL_0
674 0x00000000, // PA_SC_MODE_CNTL_1
675 0x00000000, // VGT_ENHANCE
676 0x00000100, // VGT_GS_PER_ES
677 0x00000080, // VGT_ES_PER_GS
678 0x00000002, // VGT_GS_PER_VS
679 0x00000000, // VGT_GSVS_RING_OFFSET_1
680 0x00000000, // VGT_GSVS_RING_OFFSET_2
681 0x00000000, // VGT_GSVS_RING_OFFSET_3
682 0x00000000, // VGT_GS_OUT_PRIM_TYPE
683 0x00000000, // IA_ENHANCE
684};
685static const unsigned int ci_SECT_CONTEXT_def_5[] =
686{
687 0x00000000, // WD_ENHANCE
688 0x00000000, // VGT_PRIMITIVEID_EN
689};
690static const unsigned int ci_SECT_CONTEXT_def_6[] =
691{
692 0x00000000, // VGT_PRIMITIVEID_RESET
693};
694static const unsigned int ci_SECT_CONTEXT_def_7[] =
695{
696 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
697 0, // HOLE
698 0, // HOLE
699 0x00000000, // VGT_INSTANCE_STEP_RATE_0
700 0x00000000, // VGT_INSTANCE_STEP_RATE_1
701 0x000000ff, // IA_MULTI_VGT_PARAM
702 0x00000000, // VGT_ESGS_RING_ITEMSIZE
703 0x00000000, // VGT_GSVS_RING_ITEMSIZE
704 0x00000000, // VGT_REUSE_OFF
705 0x00000000, // VGT_VTX_CNT_EN
706 0x00000000, // DB_HTILE_SURFACE
707 0x00000000, // DB_SRESULTS_COMPARE_STATE0
708 0x00000000, // DB_SRESULTS_COMPARE_STATE1
709 0x00000000, // DB_PRELOAD_CONTROL
710 0, // HOLE
711 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
712 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
713 0, // HOLE
714 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
715 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
716 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
717 0, // HOLE
718 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
719 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
720 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
721 0, // HOLE
722 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
723 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
724 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
725 0, // HOLE
726 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
727 0, // HOLE
728 0, // HOLE
729 0, // HOLE
730 0, // HOLE
731 0, // HOLE
732 0, // HOLE
733 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
734 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
735 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
736 0, // HOLE
737 0x00000000, // VGT_GS_MAX_VERT_OUT
738 0, // HOLE
739 0, // HOLE
740 0, // HOLE
741 0, // HOLE
742 0, // HOLE
743 0, // HOLE
744 0x00000000, // VGT_SHADER_STAGES_EN
745 0x00000000, // VGT_LS_HS_CONFIG
746 0x00000000, // VGT_GS_VERT_ITEMSIZE
747 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
748 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
749 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
750 0x00000000, // VGT_TF_PARAM
751 0x00000000, // DB_ALPHA_TO_MASK
752 0, // HOLE
753 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
754 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
755 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
756 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
757 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
758 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
759 0x00000000, // VGT_GS_INSTANCE_CNT
760 0x00000000, // VGT_STRMOUT_CONFIG
761 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
762 0, // HOLE
763 0, // HOLE
764 0, // HOLE
765 0, // HOLE
766 0, // HOLE
767 0, // HOLE
768 0, // HOLE
769 0, // HOLE
770 0, // HOLE
771 0, // HOLE
772 0, // HOLE
773 0, // HOLE
774 0, // HOLE
775 0, // HOLE
776 0x00000000, // PA_SC_CENTROID_PRIORITY_0
777 0x00000000, // PA_SC_CENTROID_PRIORITY_1
778 0x00001000, // PA_SC_LINE_CNTL
779 0x00000000, // PA_SC_AA_CONFIG
780 0x00000005, // PA_SU_VTX_CNTL
781 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
782 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
783 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
784 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
785 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
786 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
787 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
788 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
789 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
790 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
791 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
792 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
793 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
794 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
795 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
796 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
797 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
798 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
799 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
800 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
801 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
802 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
803 0, // HOLE
804 0, // HOLE
805 0, // HOLE
806 0, // HOLE
807 0, // HOLE
808 0, // HOLE
809 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
810 0x00000010, // VGT_OUT_DEALLOC_CNTL
811 0x00000000, // CB_COLOR0_BASE
812 0x00000000, // CB_COLOR0_PITCH
813 0x00000000, // CB_COLOR0_SLICE
814 0x00000000, // CB_COLOR0_VIEW
815 0x00000000, // CB_COLOR0_INFO
816 0x00000000, // CB_COLOR0_ATTRIB
817 0, // HOLE
818 0x00000000, // CB_COLOR0_CMASK
819 0x00000000, // CB_COLOR0_CMASK_SLICE
820 0x00000000, // CB_COLOR0_FMASK
821 0x00000000, // CB_COLOR0_FMASK_SLICE
822 0x00000000, // CB_COLOR0_CLEAR_WORD0
823 0x00000000, // CB_COLOR0_CLEAR_WORD1
824 0, // HOLE
825 0, // HOLE
826 0x00000000, // CB_COLOR1_BASE
827 0x00000000, // CB_COLOR1_PITCH
828 0x00000000, // CB_COLOR1_SLICE
829 0x00000000, // CB_COLOR1_VIEW
830 0x00000000, // CB_COLOR1_INFO
831 0x00000000, // CB_COLOR1_ATTRIB
832 0, // HOLE
833 0x00000000, // CB_COLOR1_CMASK
834 0x00000000, // CB_COLOR1_CMASK_SLICE
835 0x00000000, // CB_COLOR1_FMASK
836 0x00000000, // CB_COLOR1_FMASK_SLICE
837 0x00000000, // CB_COLOR1_CLEAR_WORD0
838 0x00000000, // CB_COLOR1_CLEAR_WORD1
839 0, // HOLE
840 0, // HOLE
841 0x00000000, // CB_COLOR2_BASE
842 0x00000000, // CB_COLOR2_PITCH
843 0x00000000, // CB_COLOR2_SLICE
844 0x00000000, // CB_COLOR2_VIEW
845 0x00000000, // CB_COLOR2_INFO
846 0x00000000, // CB_COLOR2_ATTRIB
847 0, // HOLE
848 0x00000000, // CB_COLOR2_CMASK
849 0x00000000, // CB_COLOR2_CMASK_SLICE
850 0x00000000, // CB_COLOR2_FMASK
851 0x00000000, // CB_COLOR2_FMASK_SLICE
852 0x00000000, // CB_COLOR2_CLEAR_WORD0
853 0x00000000, // CB_COLOR2_CLEAR_WORD1
854 0, // HOLE
855 0, // HOLE
856 0x00000000, // CB_COLOR3_BASE
857 0x00000000, // CB_COLOR3_PITCH
858 0x00000000, // CB_COLOR3_SLICE
859 0x00000000, // CB_COLOR3_VIEW
860 0x00000000, // CB_COLOR3_INFO
861 0x00000000, // CB_COLOR3_ATTRIB
862 0, // HOLE
863 0x00000000, // CB_COLOR3_CMASK
864 0x00000000, // CB_COLOR3_CMASK_SLICE
865 0x00000000, // CB_COLOR3_FMASK
866 0x00000000, // CB_COLOR3_FMASK_SLICE
867 0x00000000, // CB_COLOR3_CLEAR_WORD0
868 0x00000000, // CB_COLOR3_CLEAR_WORD1
869 0, // HOLE
870 0, // HOLE
871 0x00000000, // CB_COLOR4_BASE
872 0x00000000, // CB_COLOR4_PITCH
873 0x00000000, // CB_COLOR4_SLICE
874 0x00000000, // CB_COLOR4_VIEW
875 0x00000000, // CB_COLOR4_INFO
876 0x00000000, // CB_COLOR4_ATTRIB
877 0, // HOLE
878 0x00000000, // CB_COLOR4_CMASK
879 0x00000000, // CB_COLOR4_CMASK_SLICE
880 0x00000000, // CB_COLOR4_FMASK
881 0x00000000, // CB_COLOR4_FMASK_SLICE
882 0x00000000, // CB_COLOR4_CLEAR_WORD0
883 0x00000000, // CB_COLOR4_CLEAR_WORD1
884 0, // HOLE
885 0, // HOLE
886 0x00000000, // CB_COLOR5_BASE
887 0x00000000, // CB_COLOR5_PITCH
888 0x00000000, // CB_COLOR5_SLICE
889 0x00000000, // CB_COLOR5_VIEW
890 0x00000000, // CB_COLOR5_INFO
891 0x00000000, // CB_COLOR5_ATTRIB
892 0, // HOLE
893 0x00000000, // CB_COLOR5_CMASK
894 0x00000000, // CB_COLOR5_CMASK_SLICE
895 0x00000000, // CB_COLOR5_FMASK
896 0x00000000, // CB_COLOR5_FMASK_SLICE
897 0x00000000, // CB_COLOR5_CLEAR_WORD0
898 0x00000000, // CB_COLOR5_CLEAR_WORD1
899 0, // HOLE
900 0, // HOLE
901 0x00000000, // CB_COLOR6_BASE
902 0x00000000, // CB_COLOR6_PITCH
903 0x00000000, // CB_COLOR6_SLICE
904 0x00000000, // CB_COLOR6_VIEW
905 0x00000000, // CB_COLOR6_INFO
906 0x00000000, // CB_COLOR6_ATTRIB
907 0, // HOLE
908 0x00000000, // CB_COLOR6_CMASK
909 0x00000000, // CB_COLOR6_CMASK_SLICE
910 0x00000000, // CB_COLOR6_FMASK
911 0x00000000, // CB_COLOR6_FMASK_SLICE
912 0x00000000, // CB_COLOR6_CLEAR_WORD0
913 0x00000000, // CB_COLOR6_CLEAR_WORD1
914 0, // HOLE
915 0, // HOLE
916 0x00000000, // CB_COLOR7_BASE
917 0x00000000, // CB_COLOR7_PITCH
918 0x00000000, // CB_COLOR7_SLICE
919 0x00000000, // CB_COLOR7_VIEW
920 0x00000000, // CB_COLOR7_INFO
921 0x00000000, // CB_COLOR7_ATTRIB
922 0, // HOLE
923 0x00000000, // CB_COLOR7_CMASK
924 0x00000000, // CB_COLOR7_CMASK_SLICE
925 0x00000000, // CB_COLOR7_FMASK
926 0x00000000, // CB_COLOR7_FMASK_SLICE
927 0x00000000, // CB_COLOR7_CLEAR_WORD0
928 0x00000000, // CB_COLOR7_CLEAR_WORD1
929};
930static const struct cs_extent_def ci_SECT_CONTEXT_defs[] =
931{
932 {ci_SECT_CONTEXT_def_1, 0x0000a000, 212 },
933 {ci_SECT_CONTEXT_def_2, 0x0000a0d6, 274 },
934 {ci_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
935 {ci_SECT_CONTEXT_def_4, 0x0000a200, 157 },
936 {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
937 {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
938 {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
939 { 0, 0, 0 }
940};
941static const struct cs_section_def ci_cs_data[] = {
942 { ci_SECT_CONTEXT_defs, SECT_CONTEXT },
943 { 0, SECT_NONE }
944};
diff --git a/drivers/gpu/drm/radeon/clearstate_evergreen.h b/drivers/gpu/drm/radeon/clearstate_evergreen.h
index 4791d856b7fd..63a1ffbb3ced 100644
--- a/drivers/gpu/drm/radeon/clearstate_evergreen.h
+++ b/drivers/gpu/drm/radeon/clearstate_evergreen.h
@@ -1072,7 +1072,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] =
1072 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 }, 1072 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
1073 { 0, 0, 0 } 1073 { 0, 0, 0 }
1074}; 1074};
1075struct cs_section_def evergreen_cs_data[] = { 1075static const struct cs_section_def evergreen_cs_data[] = {
1076 { SECT_CONTEXT_defs, SECT_CONTEXT }, 1076 { SECT_CONTEXT_defs, SECT_CONTEXT },
1077 { SECT_CLEAR_defs, SECT_CLEAR }, 1077 { SECT_CLEAR_defs, SECT_CLEAR },
1078 { SECT_CTRLCONST_defs, SECT_CTRLCONST }, 1078 { SECT_CTRLCONST_defs, SECT_CTRLCONST },
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 9bcdd174780f..95a66db08d9b 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev)
2038{ 2038{
2039 struct rv7xx_power_info *pi; 2039 struct rv7xx_power_info *pi;
2040 struct evergreen_power_info *eg_pi; 2040 struct evergreen_power_info *eg_pi;
2041 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2042 uint16_t data_offset, size;
2043 uint8_t frev, crev;
2044 struct atom_clock_dividers dividers; 2041 struct atom_clock_dividers dividers;
2045 int ret; 2042 int ret;
2046 2043
@@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
2092 eg_pi->vddci_control = 2089 eg_pi->vddci_control =
2093 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); 2090 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
2094 2091
2095 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 2092 rv770_get_engine_memory_ss(rdev);
2096 &frev, &crev, &data_offset)) {
2097 pi->sclk_ss = true;
2098 pi->mclk_ss = true;
2099 pi->dynamic_ss = true;
2100 } else {
2101 pi->sclk_ss = false;
2102 pi->mclk_ss = false;
2103 pi->dynamic_ss = true;
2104 }
2105 2093
2106 pi->asi = RV770_ASI_DFLT; 2094 pi->asi = RV770_ASI_DFLT;
2107 pi->pasi = CYPRESS_HASI_DFLT; 2095 pi->pasi = CYPRESS_HASI_DFLT;
@@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
2122 2110
2123 pi->dynamic_pcie_gen2 = true; 2111 pi->dynamic_pcie_gen2 = true;
2124 2112
2125 if (pi->gfx_clock_gating && 2113 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
2126 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2127 pi->thermal_protection = true; 2114 pi->thermal_protection = true;
2128 else 2115 else
2129 pi->thermal_protection = false; 2116 pi->thermal_protection = false;
@@ -2179,7 +2166,8 @@ bool cypress_dpm_vblank_too_short(struct radeon_device *rdev)
2179{ 2166{
2180 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2167 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2181 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 2168 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
2182 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 2169 /* we never hit the non-gddr5 limit so disable it */
2170 u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
2183 2171
2184 if (vblank_time < switch_limit) 2172 if (vblank_time < switch_limit)
2185 return true; 2173 return true;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
new file mode 100644
index 000000000000..8953255e894b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -0,0 +1,278 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/hdmi.h>
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "sid.h"
27
28static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
29 u32 block_offset, u32 reg)
30{
31 u32 r;
32
33 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
34 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
35 return r;
36}
37
38static void dce6_endpoint_wreg(struct radeon_device *rdev,
39 u32 block_offset, u32 reg, u32 v)
40{
41 if (ASIC_IS_DCE8(rdev))
42 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
43 else
44 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
45 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
46 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
47}
48
49#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
50#define WREG32_ENDPOINT(block, reg, v) dce6_endpoint_wreg(rdev, (block), (reg), (v))
51
52
53static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
54{
55 int i;
56 u32 offset, tmp;
57
58 for (i = 0; i < rdev->audio.num_pins; i++) {
59 offset = rdev->audio.pin[i].offset;
60 tmp = RREG32_ENDPOINT(offset,
61 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
62 if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
63 rdev->audio.pin[i].connected = false;
64 else
65 rdev->audio.pin[i].connected = true;
66 }
67}
68
69struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
70{
71 int i;
72
73 dce6_afmt_get_connected_pins(rdev);
74
75 for (i = 0; i < rdev->audio.num_pins; i++) {
76 if (rdev->audio.pin[i].connected)
77 return &rdev->audio.pin[i];
78 }
79 DRM_ERROR("No connected audio pins found!\n");
80 return NULL;
81}
82
83void dce6_afmt_select_pin(struct drm_encoder *encoder)
84{
85 struct radeon_device *rdev = encoder->dev->dev_private;
86 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
87 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
88 u32 offset = dig->afmt->offset;
89 u32 id = dig->afmt->pin->id;
90
91 if (!dig->afmt->pin)
92 return;
93
94 WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id));
95}
96
97void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
98{
99 struct radeon_device *rdev = encoder->dev->dev_private;
100 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
101 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
102 struct drm_connector *connector;
103 struct radeon_connector *radeon_connector = NULL;
104 u32 offset, tmp;
105 u8 *sadb;
106 int sad_count;
107
108 if (!dig->afmt->pin)
109 return;
110
111 offset = dig->afmt->pin->offset;
112
113 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
114 if (connector->encoder == encoder)
115 radeon_connector = to_radeon_connector(connector);
116 }
117
118 if (!radeon_connector) {
119 DRM_ERROR("Couldn't find encoder's connector\n");
120 return;
121 }
122
123 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
124 if (sad_count < 0) {
125 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
126 return;
127 }
128
129 /* program the speaker allocation */
130 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
131 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
132 /* set HDMI mode */
133 tmp |= HDMI_CONNECTION;
134 if (sad_count)
135 tmp |= SPEAKER_ALLOCATION(sadb[0]);
136 else
137 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
138 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
139
140 kfree(sadb);
141}
142
143void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
144{
145 struct radeon_device *rdev = encoder->dev->dev_private;
146 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
147 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
148 u32 offset;
149 struct drm_connector *connector;
150 struct radeon_connector *radeon_connector = NULL;
151 struct cea_sad *sads;
152 int i, sad_count;
153
154 static const u16 eld_reg_to_type[][2] = {
155 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
156 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
157 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
158 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
159 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
160 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
161 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
162 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
163 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
164 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
165 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
166 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
167 };
168
169 if (!dig->afmt->pin)
170 return;
171
172 offset = dig->afmt->pin->offset;
173
174 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
175 if (connector->encoder == encoder)
176 radeon_connector = to_radeon_connector(connector);
177 }
178
179 if (!radeon_connector) {
180 DRM_ERROR("Couldn't find encoder's connector\n");
181 return;
182 }
183
184 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
185 if (sad_count < 0) {
186 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
187 return;
188 }
189 BUG_ON(!sads);
190
191 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
192 u32 value = 0;
193 int j;
194
195 for (j = 0; j < sad_count; j++) {
196 struct cea_sad *sad = &sads[j];
197
198 if (sad->format == eld_reg_to_type[i][1]) {
199 value = MAX_CHANNELS(sad->channels) |
200 DESCRIPTOR_BYTE_2(sad->byte2) |
201 SUPPORTED_FREQUENCIES(sad->freq);
202 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
203 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
204 break;
205 }
206 }
207 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
208 }
209
210 kfree(sads);
211}
212
213static int dce6_audio_chipset_supported(struct radeon_device *rdev)
214{
215 return !ASIC_IS_NODCE(rdev);
216}
217
218static void dce6_audio_enable(struct radeon_device *rdev,
219 struct r600_audio_pin *pin,
220 bool enable)
221{
222 WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
223 AUDIO_ENABLED);
224 DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
225}
226
227static const u32 pin_offsets[7] =
228{
229 (0x5e00 - 0x5e00),
230 (0x5e18 - 0x5e00),
231 (0x5e30 - 0x5e00),
232 (0x5e48 - 0x5e00),
233 (0x5e60 - 0x5e00),
234 (0x5e78 - 0x5e00),
235 (0x5e90 - 0x5e00),
236};
237
238int dce6_audio_init(struct radeon_device *rdev)
239{
240 int i;
241
242 if (!radeon_audio || !dce6_audio_chipset_supported(rdev))
243 return 0;
244
245 rdev->audio.enabled = true;
246
247 if (ASIC_IS_DCE8(rdev))
248 rdev->audio.num_pins = 7;
249 else
250 rdev->audio.num_pins = 6;
251
252 for (i = 0; i < rdev->audio.num_pins; i++) {
253 rdev->audio.pin[i].channels = -1;
254 rdev->audio.pin[i].rate = -1;
255 rdev->audio.pin[i].bits_per_sample = -1;
256 rdev->audio.pin[i].status_bits = 0;
257 rdev->audio.pin[i].category_code = 0;
258 rdev->audio.pin[i].connected = false;
259 rdev->audio.pin[i].offset = pin_offsets[i];
260 rdev->audio.pin[i].id = i;
261 dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
262 }
263
264 return 0;
265}
266
267void dce6_audio_fini(struct radeon_device *rdev)
268{
269 int i;
270
271 if (!rdev->audio.enabled)
272 return;
273
274 for (i = 0; i < rdev->audio.num_pins; i++)
275 dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
276
277 rdev->audio.enabled = false;
278}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 038dcac7670c..555164e270a7 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -47,7 +47,7 @@ static const u32 crtc_offsets[6] =
47 47
48#include "clearstate_evergreen.h" 48#include "clearstate_evergreen.h"
49 49
50static u32 sumo_rlc_save_restore_register_list[] = 50static const u32 sumo_rlc_save_restore_register_list[] =
51{ 51{
52 0x98fc, 52 0x98fc,
53 0x9830, 53 0x9830,
@@ -131,7 +131,6 @@ static u32 sumo_rlc_save_restore_register_list[] =
131 0x9150, 131 0x9150,
132 0x802c, 132 0x802c,
133}; 133};
134static u32 sumo_rlc_save_restore_register_list_size = ARRAY_SIZE(sumo_rlc_save_restore_register_list);
135 134
136static void evergreen_gpu_init(struct radeon_device *rdev); 135static void evergreen_gpu_init(struct radeon_device *rdev);
137void evergreen_fini(struct radeon_device *rdev); 136void evergreen_fini(struct radeon_device *rdev);
@@ -141,6 +140,12 @@ extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
141 int ring, u32 cp_int_cntl); 140 int ring, u32 cp_int_cntl);
142extern void cayman_vm_decode_fault(struct radeon_device *rdev, 141extern void cayman_vm_decode_fault(struct radeon_device *rdev,
143 u32 status, u32 addr); 142 u32 status, u32 addr);
143void cik_init_cp_pg_table(struct radeon_device *rdev);
144
145extern u32 si_get_csb_size(struct radeon_device *rdev);
146extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
147extern u32 cik_get_csb_size(struct radeon_device *rdev);
148extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
144 149
145static const u32 evergreen_golden_registers[] = 150static const u32 evergreen_golden_registers[] =
146{ 151{
@@ -1807,7 +1812,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
1807 struct drm_display_mode *mode, 1812 struct drm_display_mode *mode,
1808 struct drm_display_mode *other_mode) 1813 struct drm_display_mode *other_mode)
1809{ 1814{
1810 u32 tmp; 1815 u32 tmp, buffer_alloc, i;
1816 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1811 /* 1817 /*
1812 * Line Buffer Setup 1818 * Line Buffer Setup
1813 * There are 3 line buffers, each one shared by 2 display controllers. 1819 * There are 3 line buffers, each one shared by 2 display controllers.
@@ -1830,18 +1836,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
1830 * non-linked crtcs for maximum line buffer allocation. 1836 * non-linked crtcs for maximum line buffer allocation.
1831 */ 1837 */
1832 if (radeon_crtc->base.enabled && mode) { 1838 if (radeon_crtc->base.enabled && mode) {
1833 if (other_mode) 1839 if (other_mode) {
1834 tmp = 0; /* 1/2 */ 1840 tmp = 0; /* 1/2 */
1835 else 1841 buffer_alloc = 1;
1842 } else {
1836 tmp = 2; /* whole */ 1843 tmp = 2; /* whole */
1837 } else 1844 buffer_alloc = 2;
1845 }
1846 } else {
1838 tmp = 0; 1847 tmp = 0;
1848 buffer_alloc = 0;
1849 }
1839 1850
1840 /* second controller of the pair uses second half of the lb */ 1851 /* second controller of the pair uses second half of the lb */
1841 if (radeon_crtc->crtc_id % 2) 1852 if (radeon_crtc->crtc_id % 2)
1842 tmp += 4; 1853 tmp += 4;
1843 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); 1854 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
1844 1855
1856 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1857 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1858 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1859 for (i = 0; i < rdev->usec_timeout; i++) {
1860 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1861 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1862 break;
1863 udelay(1);
1864 }
1865 }
1866
1845 if (radeon_crtc->base.enabled && mode) { 1867 if (radeon_crtc->base.enabled && mode) {
1846 switch (tmp) { 1868 switch (tmp) {
1847 case 0: 1869 case 0:
@@ -2881,8 +2903,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
2881 RREG32(GRBM_SOFT_RESET); 2903 RREG32(GRBM_SOFT_RESET);
2882 2904
2883 /* Set ring buffer size */ 2905 /* Set ring buffer size */
2884 rb_bufsz = drm_order(ring->ring_size / 8); 2906 rb_bufsz = order_base_2(ring->ring_size / 8);
2885 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2907 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2886#ifdef __BIG_ENDIAN 2908#ifdef __BIG_ENDIAN
2887 tmp |= BUF_SWAP_32BIT; 2909 tmp |= BUF_SWAP_32BIT;
2888#endif 2910#endif
@@ -3613,7 +3635,7 @@ bool evergreen_is_display_hung(struct radeon_device *rdev)
3613 return true; 3635 return true;
3614} 3636}
3615 3637
3616static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev) 3638u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
3617{ 3639{
3618 u32 reset_mask = 0; 3640 u32 reset_mask = 0;
3619 u32 tmp; 3641 u32 tmp;
@@ -3839,28 +3861,6 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
3839 return radeon_ring_test_lockup(rdev, ring); 3861 return radeon_ring_test_lockup(rdev, ring);
3840} 3862}
3841 3863
3842/**
3843 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
3844 *
3845 * @rdev: radeon_device pointer
3846 * @ring: radeon_ring structure holding ring information
3847 *
3848 * Check if the async DMA engine is locked up.
3849 * Returns true if the engine appears to be locked up, false if not.
3850 */
3851bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
3852{
3853 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
3854
3855 if (!(reset_mask & RADEON_RESET_DMA)) {
3856 radeon_ring_lockup_update(ring);
3857 return false;
3858 }
3859 /* force ring activities */
3860 radeon_ring_force_activity(rdev, ring);
3861 return radeon_ring_test_lockup(rdev, ring);
3862}
3863
3864/* 3864/*
3865 * RLC 3865 * RLC
3866 */ 3866 */
@@ -3894,147 +3894,231 @@ void sumo_rlc_fini(struct radeon_device *rdev)
3894 radeon_bo_unref(&rdev->rlc.clear_state_obj); 3894 radeon_bo_unref(&rdev->rlc.clear_state_obj);
3895 rdev->rlc.clear_state_obj = NULL; 3895 rdev->rlc.clear_state_obj = NULL;
3896 } 3896 }
3897
3898 /* clear state block */
3899 if (rdev->rlc.cp_table_obj) {
3900 r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
3901 if (unlikely(r != 0))
3902 dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3903 radeon_bo_unpin(rdev->rlc.cp_table_obj);
3904 radeon_bo_unreserve(rdev->rlc.cp_table_obj);
3905
3906 radeon_bo_unref(&rdev->rlc.cp_table_obj);
3907 rdev->rlc.cp_table_obj = NULL;
3908 }
3897} 3909}
3898 3910
3911#define CP_ME_TABLE_SIZE 96
3912
3899int sumo_rlc_init(struct radeon_device *rdev) 3913int sumo_rlc_init(struct radeon_device *rdev)
3900{ 3914{
3901 u32 *src_ptr; 3915 const u32 *src_ptr;
3902 volatile u32 *dst_ptr; 3916 volatile u32 *dst_ptr;
3903 u32 dws, data, i, j, k, reg_num; 3917 u32 dws, data, i, j, k, reg_num;
3904 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index; 3918 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
3905 u64 reg_list_mc_addr; 3919 u64 reg_list_mc_addr;
3906 struct cs_section_def *cs_data; 3920 const struct cs_section_def *cs_data;
3907 int r; 3921 int r;
3908 3922
3909 src_ptr = rdev->rlc.reg_list; 3923 src_ptr = rdev->rlc.reg_list;
3910 dws = rdev->rlc.reg_list_size; 3924 dws = rdev->rlc.reg_list_size;
3925 if (rdev->family >= CHIP_BONAIRE) {
3926 dws += (5 * 16) + 48 + 48 + 64;
3927 }
3911 cs_data = rdev->rlc.cs_data; 3928 cs_data = rdev->rlc.cs_data;
3912 3929
3913 /* save restore block */ 3930 if (src_ptr) {
3914 if (rdev->rlc.save_restore_obj == NULL) { 3931 /* save restore block */
3915 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 3932 if (rdev->rlc.save_restore_obj == NULL) {
3916 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj); 3933 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
3934 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
3935 if (r) {
3936 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
3937 return r;
3938 }
3939 }
3940
3941 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
3942 if (unlikely(r != 0)) {
3943 sumo_rlc_fini(rdev);
3944 return r;
3945 }
3946 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
3947 &rdev->rlc.save_restore_gpu_addr);
3917 if (r) { 3948 if (r) {
3918 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); 3949 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3950 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
3951 sumo_rlc_fini(rdev);
3919 return r; 3952 return r;
3920 } 3953 }
3921 }
3922 3954
3923 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); 3955 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
3924 if (unlikely(r != 0)) { 3956 if (r) {
3925 sumo_rlc_fini(rdev); 3957 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
3926 return r; 3958 sumo_rlc_fini(rdev);
3927 } 3959 return r;
3928 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 3960 }
3929 &rdev->rlc.save_restore_gpu_addr); 3961 /* write the sr buffer */
3930 if (r) { 3962 dst_ptr = rdev->rlc.sr_ptr;
3963 if (rdev->family >= CHIP_TAHITI) {
3964 /* SI */
3965 for (i = 0; i < rdev->rlc.reg_list_size; i++)
3966 dst_ptr[i] = src_ptr[i];
3967 } else {
3968 /* ON/LN/TN */
3969 /* format:
3970 * dw0: (reg2 << 16) | reg1
3971 * dw1: reg1 save space
3972 * dw2: reg2 save space
3973 */
3974 for (i = 0; i < dws; i++) {
3975 data = src_ptr[i] >> 2;
3976 i++;
3977 if (i < dws)
3978 data |= (src_ptr[i] >> 2) << 16;
3979 j = (((i - 1) * 3) / 2);
3980 dst_ptr[j] = data;
3981 }
3982 j = ((i * 3) / 2);
3983 dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
3984 }
3985 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
3931 radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3986 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3932 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
3933 sumo_rlc_fini(rdev);
3934 return r;
3935 } 3987 }
3936 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
3937 if (r) {
3938 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
3939 sumo_rlc_fini(rdev);
3940 return r;
3941 }
3942 /* write the sr buffer */
3943 dst_ptr = rdev->rlc.sr_ptr;
3944 /* format:
3945 * dw0: (reg2 << 16) | reg1
3946 * dw1: reg1 save space
3947 * dw2: reg2 save space
3948 */
3949 for (i = 0; i < dws; i++) {
3950 data = src_ptr[i] >> 2;
3951 i++;
3952 if (i < dws)
3953 data |= (src_ptr[i] >> 2) << 16;
3954 j = (((i - 1) * 3) / 2);
3955 dst_ptr[j] = data;
3956 }
3957 j = ((i * 3) / 2);
3958 dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
3959
3960 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
3961 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3962 3988
3963 /* clear state block */ 3989 if (cs_data) {
3964 reg_list_num = 0; 3990 /* clear state block */
3965 dws = 0; 3991 if (rdev->family >= CHIP_BONAIRE) {
3966 for (i = 0; cs_data[i].section != NULL; i++) { 3992 rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
3967 for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 3993 } else if (rdev->family >= CHIP_TAHITI) {
3968 reg_list_num++; 3994 rdev->rlc.clear_state_size = si_get_csb_size(rdev);
3969 dws += cs_data[i].section[j].reg_count; 3995 dws = rdev->rlc.clear_state_size + (256 / 4);
3996 } else {
3997 reg_list_num = 0;
3998 dws = 0;
3999 for (i = 0; cs_data[i].section != NULL; i++) {
4000 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4001 reg_list_num++;
4002 dws += cs_data[i].section[j].reg_count;
4003 }
4004 }
4005 reg_list_blk_index = (3 * reg_list_num + 2);
4006 dws += reg_list_blk_index;
4007 rdev->rlc.clear_state_size = dws;
3970 } 4008 }
3971 }
3972 reg_list_blk_index = (3 * reg_list_num + 2);
3973 dws += reg_list_blk_index;
3974 4009
3975 if (rdev->rlc.clear_state_obj == NULL) { 4010 if (rdev->rlc.clear_state_obj == NULL) {
3976 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 4011 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
3977 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); 4012 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
4013 if (r) {
4014 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
4015 sumo_rlc_fini(rdev);
4016 return r;
4017 }
4018 }
4019 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
4020 if (unlikely(r != 0)) {
4021 sumo_rlc_fini(rdev);
4022 return r;
4023 }
4024 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
4025 &rdev->rlc.clear_state_gpu_addr);
3978 if (r) { 4026 if (r) {
3979 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); 4027 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4028 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
3980 sumo_rlc_fini(rdev); 4029 sumo_rlc_fini(rdev);
3981 return r; 4030 return r;
3982 } 4031 }
3983 }
3984 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
3985 if (unlikely(r != 0)) {
3986 sumo_rlc_fini(rdev);
3987 return r;
3988 }
3989 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
3990 &rdev->rlc.clear_state_gpu_addr);
3991 if (r) {
3992 4032
3993 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 4033 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
3994 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); 4034 if (r) {
3995 sumo_rlc_fini(rdev); 4035 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
3996 return r; 4036 sumo_rlc_fini(rdev);
3997 } 4037 return r;
3998 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr); 4038 }
3999 if (r) { 4039 /* set up the cs buffer */
4000 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); 4040 dst_ptr = rdev->rlc.cs_ptr;
4001 sumo_rlc_fini(rdev); 4041 if (rdev->family >= CHIP_BONAIRE) {
4002 return r; 4042 cik_get_csb_buffer(rdev, dst_ptr);
4003 } 4043 } else if (rdev->family >= CHIP_TAHITI) {
4004 /* set up the cs buffer */ 4044 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
4005 dst_ptr = rdev->rlc.cs_ptr; 4045 dst_ptr[0] = upper_32_bits(reg_list_mc_addr);
4006 reg_list_hdr_blk_index = 0; 4046 dst_ptr[1] = lower_32_bits(reg_list_mc_addr);
4007 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); 4047 dst_ptr[2] = rdev->rlc.clear_state_size;
4008 data = upper_32_bits(reg_list_mc_addr); 4048 si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
4009 dst_ptr[reg_list_hdr_blk_index] = data; 4049 } else {
4010 reg_list_hdr_blk_index++; 4050 reg_list_hdr_blk_index = 0;
4011 for (i = 0; cs_data[i].section != NULL; i++) { 4051 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
4012 for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 4052 data = upper_32_bits(reg_list_mc_addr);
4013 reg_num = cs_data[i].section[j].reg_count;
4014 data = reg_list_mc_addr & 0xffffffff;
4015 dst_ptr[reg_list_hdr_blk_index] = data;
4016 reg_list_hdr_blk_index++;
4017
4018 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
4019 dst_ptr[reg_list_hdr_blk_index] = data;
4020 reg_list_hdr_blk_index++;
4021
4022 data = 0x08000000 | (reg_num * 4);
4023 dst_ptr[reg_list_hdr_blk_index] = data; 4053 dst_ptr[reg_list_hdr_blk_index] = data;
4024 reg_list_hdr_blk_index++; 4054 reg_list_hdr_blk_index++;
4025 4055 for (i = 0; cs_data[i].section != NULL; i++) {
4026 for (k = 0; k < reg_num; k++) { 4056 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4027 data = cs_data[i].section[j].extent[k]; 4057 reg_num = cs_data[i].section[j].reg_count;
4028 dst_ptr[reg_list_blk_index + k] = data; 4058 data = reg_list_mc_addr & 0xffffffff;
4059 dst_ptr[reg_list_hdr_blk_index] = data;
4060 reg_list_hdr_blk_index++;
4061
4062 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
4063 dst_ptr[reg_list_hdr_blk_index] = data;
4064 reg_list_hdr_blk_index++;
4065
4066 data = 0x08000000 | (reg_num * 4);
4067 dst_ptr[reg_list_hdr_blk_index] = data;
4068 reg_list_hdr_blk_index++;
4069
4070 for (k = 0; k < reg_num; k++) {
4071 data = cs_data[i].section[j].extent[k];
4072 dst_ptr[reg_list_blk_index + k] = data;
4073 }
4074 reg_list_mc_addr += reg_num * 4;
4075 reg_list_blk_index += reg_num;
4076 }
4029 } 4077 }
4030 reg_list_mc_addr += reg_num * 4; 4078 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
4031 reg_list_blk_index += reg_num;
4032 } 4079 }
4080 radeon_bo_kunmap(rdev->rlc.clear_state_obj);
4081 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4033 } 4082 }
4034 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
4035 4083
4036 radeon_bo_kunmap(rdev->rlc.clear_state_obj); 4084 if (rdev->rlc.cp_table_size) {
4037 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 4085 if (rdev->rlc.cp_table_obj == NULL) {
4086 r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
4087 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
4088 if (r) {
4089 dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
4090 sumo_rlc_fini(rdev);
4091 return r;
4092 }
4093 }
4094
4095 r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
4096 if (unlikely(r != 0)) {
4097 dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
4098 sumo_rlc_fini(rdev);
4099 return r;
4100 }
4101 r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
4102 &rdev->rlc.cp_table_gpu_addr);
4103 if (r) {
4104 radeon_bo_unreserve(rdev->rlc.cp_table_obj);
4105 dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
4106 sumo_rlc_fini(rdev);
4107 return r;
4108 }
4109 r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
4110 if (r) {
4111 dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
4112 sumo_rlc_fini(rdev);
4113 return r;
4114 }
4115
4116 cik_init_cp_pg_table(rdev);
4117
4118 radeon_bo_kunmap(rdev->rlc.cp_table_obj);
4119 radeon_bo_unreserve(rdev->rlc.cp_table_obj);
4120
4121 }
4038 4122
4039 return 0; 4123 return 0;
4040} 4124}
@@ -4959,143 +5043,6 @@ restart_ih:
4959 return IRQ_HANDLED; 5043 return IRQ_HANDLED;
4960} 5044}
4961 5045
4962/**
4963 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
4964 *
4965 * @rdev: radeon_device pointer
4966 * @fence: radeon fence object
4967 *
4968 * Add a DMA fence packet to the ring to write
4969 * the fence seq number and DMA trap packet to generate
4970 * an interrupt if needed (evergreen-SI).
4971 */
4972void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
4973 struct radeon_fence *fence)
4974{
4975 struct radeon_ring *ring = &rdev->ring[fence->ring];
4976 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
4977 /* write the fence */
4978 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
4979 radeon_ring_write(ring, addr & 0xfffffffc);
4980 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
4981 radeon_ring_write(ring, fence->seq);
4982 /* generate an interrupt */
4983 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
4984 /* flush HDP */
4985 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
4986 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
4987 radeon_ring_write(ring, 1);
4988}
4989
4990/**
4991 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
4992 *
4993 * @rdev: radeon_device pointer
4994 * @ib: IB object to schedule
4995 *
4996 * Schedule an IB in the DMA ring (evergreen).
4997 */
4998void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
4999 struct radeon_ib *ib)
5000{
5001 struct radeon_ring *ring = &rdev->ring[ib->ring];
5002
5003 if (rdev->wb.enabled) {
5004 u32 next_rptr = ring->wptr + 4;
5005 while ((next_rptr & 7) != 5)
5006 next_rptr++;
5007 next_rptr += 3;
5008 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
5009 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
5010 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
5011 radeon_ring_write(ring, next_rptr);
5012 }
5013
5014 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
5015 * Pad as necessary with NOPs.
5016 */
5017 while ((ring->wptr & 7) != 5)
5018 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
5019 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
5020 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
5021 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
5022
5023}
5024
5025/**
5026 * evergreen_copy_dma - copy pages using the DMA engine
5027 *
5028 * @rdev: radeon_device pointer
5029 * @src_offset: src GPU address
5030 * @dst_offset: dst GPU address
5031 * @num_gpu_pages: number of GPU pages to xfer
5032 * @fence: radeon fence object
5033 *
5034 * Copy GPU paging using the DMA engine (evergreen-cayman).
5035 * Used by the radeon ttm implementation to move pages if
5036 * registered as the asic copy callback.
5037 */
5038int evergreen_copy_dma(struct radeon_device *rdev,
5039 uint64_t src_offset, uint64_t dst_offset,
5040 unsigned num_gpu_pages,
5041 struct radeon_fence **fence)
5042{
5043 struct radeon_semaphore *sem = NULL;
5044 int ring_index = rdev->asic->copy.dma_ring_index;
5045 struct radeon_ring *ring = &rdev->ring[ring_index];
5046 u32 size_in_dw, cur_size_in_dw;
5047 int i, num_loops;
5048 int r = 0;
5049
5050 r = radeon_semaphore_create(rdev, &sem);
5051 if (r) {
5052 DRM_ERROR("radeon: moving bo (%d).\n", r);
5053 return r;
5054 }
5055
5056 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
5057 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
5058 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
5059 if (r) {
5060 DRM_ERROR("radeon: moving bo (%d).\n", r);
5061 radeon_semaphore_free(rdev, &sem, NULL);
5062 return r;
5063 }
5064
5065 if (radeon_fence_need_sync(*fence, ring->idx)) {
5066 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
5067 ring->idx);
5068 radeon_fence_note_sync(*fence, ring->idx);
5069 } else {
5070 radeon_semaphore_free(rdev, &sem, NULL);
5071 }
5072
5073 for (i = 0; i < num_loops; i++) {
5074 cur_size_in_dw = size_in_dw;
5075 if (cur_size_in_dw > 0xFFFFF)
5076 cur_size_in_dw = 0xFFFFF;
5077 size_in_dw -= cur_size_in_dw;
5078 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
5079 radeon_ring_write(ring, dst_offset & 0xfffffffc);
5080 radeon_ring_write(ring, src_offset & 0xfffffffc);
5081 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
5082 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
5083 src_offset += cur_size_in_dw * 4;
5084 dst_offset += cur_size_in_dw * 4;
5085 }
5086
5087 r = radeon_fence_emit(rdev, fence, ring->idx);
5088 if (r) {
5089 radeon_ring_unlock_undo(rdev, ring);
5090 return r;
5091 }
5092
5093 radeon_ring_unlock_commit(rdev, ring);
5094 radeon_semaphore_free(rdev, &sem, *fence);
5095
5096 return r;
5097}
5098
5099static int evergreen_startup(struct radeon_device *rdev) 5046static int evergreen_startup(struct radeon_device *rdev)
5100{ 5047{
5101 struct radeon_ring *ring; 5048 struct radeon_ring *ring;
@@ -5106,6 +5053,13 @@ static int evergreen_startup(struct radeon_device *rdev)
5106 /* enable aspm */ 5053 /* enable aspm */
5107 evergreen_program_aspm(rdev); 5054 evergreen_program_aspm(rdev);
5108 5055
5056 /* scratch needs to be initialized before MC */
5057 r = r600_vram_scratch_init(rdev);
5058 if (r)
5059 return r;
5060
5061 evergreen_mc_program(rdev);
5062
5109 if (ASIC_IS_DCE5(rdev)) { 5063 if (ASIC_IS_DCE5(rdev)) {
5110 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 5064 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
5111 r = ni_init_microcode(rdev); 5065 r = ni_init_microcode(rdev);
@@ -5129,11 +5083,6 @@ static int evergreen_startup(struct radeon_device *rdev)
5129 } 5083 }
5130 } 5084 }
5131 5085
5132 r = r600_vram_scratch_init(rdev);
5133 if (r)
5134 return r;
5135
5136 evergreen_mc_program(rdev);
5137 if (rdev->flags & RADEON_IS_AGP) { 5086 if (rdev->flags & RADEON_IS_AGP) {
5138 evergreen_agp_enable(rdev); 5087 evergreen_agp_enable(rdev);
5139 } else { 5088 } else {
@@ -5143,17 +5092,11 @@ static int evergreen_startup(struct radeon_device *rdev)
5143 } 5092 }
5144 evergreen_gpu_init(rdev); 5093 evergreen_gpu_init(rdev);
5145 5094
5146 r = evergreen_blit_init(rdev);
5147 if (r) {
5148 r600_blit_fini(rdev);
5149 rdev->asic->copy.copy = NULL;
5150 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
5151 }
5152
5153 /* allocate rlc buffers */ 5095 /* allocate rlc buffers */
5154 if (rdev->flags & RADEON_IS_IGP) { 5096 if (rdev->flags & RADEON_IS_IGP) {
5155 rdev->rlc.reg_list = sumo_rlc_save_restore_register_list; 5097 rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
5156 rdev->rlc.reg_list_size = sumo_rlc_save_restore_register_list_size; 5098 rdev->rlc.reg_list_size =
5099 (u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
5157 rdev->rlc.cs_data = evergreen_cs_data; 5100 rdev->rlc.cs_data = evergreen_cs_data;
5158 r = sumo_rlc_init(rdev); 5101 r = sumo_rlc_init(rdev);
5159 if (r) { 5102 if (r) {
@@ -5179,7 +5122,7 @@ static int evergreen_startup(struct radeon_device *rdev)
5179 return r; 5122 return r;
5180 } 5123 }
5181 5124
5182 r = rv770_uvd_resume(rdev); 5125 r = uvd_v2_2_resume(rdev);
5183 if (!r) { 5126 if (!r) {
5184 r = radeon_fence_driver_start_ring(rdev, 5127 r = radeon_fence_driver_start_ring(rdev,
5185 R600_RING_TYPE_UVD_INDEX); 5128 R600_RING_TYPE_UVD_INDEX);
@@ -5208,14 +5151,14 @@ static int evergreen_startup(struct radeon_device *rdev)
5208 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 5151 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
5209 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 5152 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
5210 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 5153 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
5211 0, 0xfffff, RADEON_CP_PACKET2); 5154 RADEON_CP_PACKET2);
5212 if (r) 5155 if (r)
5213 return r; 5156 return r;
5214 5157
5215 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 5158 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
5216 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 5159 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
5217 DMA_RB_RPTR, DMA_RB_WPTR, 5160 DMA_RB_RPTR, DMA_RB_WPTR,
5218 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0)); 5161 DMA_PACKET(DMA_PACKET_NOP, 0, 0));
5219 if (r) 5162 if (r)
5220 return r; 5163 return r;
5221 5164
@@ -5231,12 +5174,11 @@ static int evergreen_startup(struct radeon_device *rdev)
5231 5174
5232 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 5175 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
5233 if (ring->ring_size) { 5176 if (ring->ring_size) {
5234 r = radeon_ring_init(rdev, ring, ring->ring_size, 5177 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
5235 R600_WB_UVD_RPTR_OFFSET,
5236 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 5178 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
5237 0, 0xfffff, RADEON_CP_PACKET2); 5179 RADEON_CP_PACKET2);
5238 if (!r) 5180 if (!r)
5239 r = r600_uvd_init(rdev); 5181 r = uvd_v1_0_init(rdev);
5240 5182
5241 if (r) 5183 if (r)
5242 DRM_ERROR("radeon: error initializing UVD (%d).\n", r); 5184 DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
@@ -5291,10 +5233,10 @@ int evergreen_resume(struct radeon_device *rdev)
5291int evergreen_suspend(struct radeon_device *rdev) 5233int evergreen_suspend(struct radeon_device *rdev)
5292{ 5234{
5293 r600_audio_fini(rdev); 5235 r600_audio_fini(rdev);
5236 uvd_v1_0_fini(rdev);
5294 radeon_uvd_suspend(rdev); 5237 radeon_uvd_suspend(rdev);
5295 r700_cp_stop(rdev); 5238 r700_cp_stop(rdev);
5296 r600_dma_stop(rdev); 5239 r600_dma_stop(rdev);
5297 r600_uvd_rbc_stop(rdev);
5298 evergreen_irq_suspend(rdev); 5240 evergreen_irq_suspend(rdev);
5299 radeon_wb_disable(rdev); 5241 radeon_wb_disable(rdev);
5300 evergreen_pcie_gart_disable(rdev); 5242 evergreen_pcie_gart_disable(rdev);
@@ -5419,7 +5361,6 @@ int evergreen_init(struct radeon_device *rdev)
5419void evergreen_fini(struct radeon_device *rdev) 5361void evergreen_fini(struct radeon_device *rdev)
5420{ 5362{
5421 r600_audio_fini(rdev); 5363 r600_audio_fini(rdev);
5422 r600_blit_fini(rdev);
5423 r700_cp_fini(rdev); 5364 r700_cp_fini(rdev);
5424 r600_dma_fini(rdev); 5365 r600_dma_fini(rdev);
5425 r600_irq_fini(rdev); 5366 r600_irq_fini(rdev);
@@ -5429,6 +5370,7 @@ void evergreen_fini(struct radeon_device *rdev)
5429 radeon_ib_pool_fini(rdev); 5370 radeon_ib_pool_fini(rdev);
5430 radeon_irq_kms_fini(rdev); 5371 radeon_irq_kms_fini(rdev);
5431 evergreen_pcie_gart_fini(rdev); 5372 evergreen_pcie_gart_fini(rdev);
5373 uvd_v1_0_fini(rdev);
5432 radeon_uvd_fini(rdev); 5374 radeon_uvd_fini(rdev);
5433 r600_vram_scratch_fini(rdev); 5375 r600_vram_scratch_fini(rdev);
5434 radeon_gem_fini(rdev); 5376 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
deleted file mode 100644
index 057c87b6515a..000000000000
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ /dev/null
@@ -1,729 +0,0 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Alex Deucher <alexander.deucher@amd.com>
25 */
26
27#include <drm/drmP.h>
28#include <drm/radeon_drm.h>
29#include "radeon.h"
30
31#include "evergreend.h"
32#include "evergreen_blit_shaders.h"
33#include "cayman_blit_shaders.h"
34#include "radeon_blit_common.h"
35
36/* emits 17 */
37static void
38set_render_target(struct radeon_device *rdev, int format,
39 int w, int h, u64 gpu_addr)
40{
41 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
42 u32 cb_color_info;
43 int pitch, slice;
44
45 h = ALIGN(h, 8);
46 if (h < 8)
47 h = 8;
48
49 cb_color_info = CB_FORMAT(format) |
50 CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
51 CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
52 pitch = (w / 8) - 1;
53 slice = ((w * h) / 64) - 1;
54
55 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
56 radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
57 radeon_ring_write(ring, gpu_addr >> 8);
58 radeon_ring_write(ring, pitch);
59 radeon_ring_write(ring, slice);
60 radeon_ring_write(ring, 0);
61 radeon_ring_write(ring, cb_color_info);
62 radeon_ring_write(ring, 0);
63 radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
64 radeon_ring_write(ring, 0);
65 radeon_ring_write(ring, 0);
66 radeon_ring_write(ring, 0);
67 radeon_ring_write(ring, 0);
68 radeon_ring_write(ring, 0);
69 radeon_ring_write(ring, 0);
70 radeon_ring_write(ring, 0);
71 radeon_ring_write(ring, 0);
72}
73
74/* emits 5dw */
75static void
76cp_set_surface_sync(struct radeon_device *rdev,
77 u32 sync_type, u32 size,
78 u64 mc_addr)
79{
80 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
81 u32 cp_coher_size;
82
83 if (size == 0xffffffff)
84 cp_coher_size = 0xffffffff;
85 else
86 cp_coher_size = ((size + 255) >> 8);
87
88 if (rdev->family >= CHIP_CAYMAN) {
89 /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
90 * to the RB directly. For IBs, the CP programs this as part of the
91 * surface_sync packet.
92 */
93 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
94 radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
95 radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
96 }
97 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
98 radeon_ring_write(ring, sync_type);
99 radeon_ring_write(ring, cp_coher_size);
100 radeon_ring_write(ring, mc_addr >> 8);
101 radeon_ring_write(ring, 10); /* poll interval */
102}
103
104/* emits 11dw + 1 surface sync = 16dw */
105static void
106set_shaders(struct radeon_device *rdev)
107{
108 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
109 u64 gpu_addr;
110
111 /* VS */
112 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
113 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
114 radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
115 radeon_ring_write(ring, gpu_addr >> 8);
116 radeon_ring_write(ring, 2);
117 radeon_ring_write(ring, 0);
118
119 /* PS */
120 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
121 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
122 radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
123 radeon_ring_write(ring, gpu_addr >> 8);
124 radeon_ring_write(ring, 1);
125 radeon_ring_write(ring, 0);
126 radeon_ring_write(ring, 2);
127
128 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
129 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
130}
131
132/* emits 10 + 1 sync (5) = 15 */
133static void
134set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
135{
136 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
137 u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
138
139 /* high addr, stride */
140 sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
141 SQ_VTXC_STRIDE(16);
142#ifdef __BIG_ENDIAN
143 sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
144#endif
145 /* xyzw swizzles */
146 sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
147 SQ_VTCX_SEL_Y(SQ_SEL_Y) |
148 SQ_VTCX_SEL_Z(SQ_SEL_Z) |
149 SQ_VTCX_SEL_W(SQ_SEL_W);
150
151 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
152 radeon_ring_write(ring, 0x580);
153 radeon_ring_write(ring, gpu_addr & 0xffffffff);
154 radeon_ring_write(ring, 48 - 1); /* size */
155 radeon_ring_write(ring, sq_vtx_constant_word2);
156 radeon_ring_write(ring, sq_vtx_constant_word3);
157 radeon_ring_write(ring, 0);
158 radeon_ring_write(ring, 0);
159 radeon_ring_write(ring, 0);
160 radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
161
162 if ((rdev->family == CHIP_CEDAR) ||
163 (rdev->family == CHIP_PALM) ||
164 (rdev->family == CHIP_SUMO) ||
165 (rdev->family == CHIP_SUMO2) ||
166 (rdev->family == CHIP_CAICOS))
167 cp_set_surface_sync(rdev,
168 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
169 else
170 cp_set_surface_sync(rdev,
171 PACKET3_VC_ACTION_ENA, 48, gpu_addr);
172
173}
174
175/* emits 10 */
176static void
177set_tex_resource(struct radeon_device *rdev,
178 int format, int w, int h, int pitch,
179 u64 gpu_addr, u32 size)
180{
181 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
182 u32 sq_tex_resource_word0, sq_tex_resource_word1;
183 u32 sq_tex_resource_word4, sq_tex_resource_word7;
184
185 if (h < 1)
186 h = 1;
187
188 sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
189 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
190 ((w - 1) << 18));
191 sq_tex_resource_word1 = ((h - 1) << 0) |
192 TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
193 /* xyzw swizzles */
194 sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
195 TEX_DST_SEL_Y(SQ_SEL_Y) |
196 TEX_DST_SEL_Z(SQ_SEL_Z) |
197 TEX_DST_SEL_W(SQ_SEL_W);
198
199 sq_tex_resource_word7 = format |
200 S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
201
202 cp_set_surface_sync(rdev,
203 PACKET3_TC_ACTION_ENA, size, gpu_addr);
204
205 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
206 radeon_ring_write(ring, 0);
207 radeon_ring_write(ring, sq_tex_resource_word0);
208 radeon_ring_write(ring, sq_tex_resource_word1);
209 radeon_ring_write(ring, gpu_addr >> 8);
210 radeon_ring_write(ring, gpu_addr >> 8);
211 radeon_ring_write(ring, sq_tex_resource_word4);
212 radeon_ring_write(ring, 0);
213 radeon_ring_write(ring, 0);
214 radeon_ring_write(ring, sq_tex_resource_word7);
215}
216
217/* emits 12 */
218static void
219set_scissors(struct radeon_device *rdev, int x1, int y1,
220 int x2, int y2)
221{
222 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
223 /* workaround some hw bugs */
224 if (x2 == 0)
225 x1 = 1;
226 if (y2 == 0)
227 y1 = 1;
228 if (rdev->family >= CHIP_CAYMAN) {
229 if ((x2 == 1) && (y2 == 1))
230 x2 = 2;
231 }
232
233 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
234 radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
235 radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
236 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
237
238 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
239 radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
240 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
241 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
242
243 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
244 radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
245 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
246 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
247}
248
249/* emits 10 */
250static void
251draw_auto(struct radeon_device *rdev)
252{
253 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
254 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
255 radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
256 radeon_ring_write(ring, DI_PT_RECTLIST);
257
258 radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
259 radeon_ring_write(ring,
260#ifdef __BIG_ENDIAN
261 (2 << 2) |
262#endif
263 DI_INDEX_SIZE_16_BIT);
264
265 radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
266 radeon_ring_write(ring, 1);
267
268 radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
269 radeon_ring_write(ring, 3);
270 radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
271
272}
273
274/* emits 39 */
275static void
276set_default_state(struct radeon_device *rdev)
277{
278 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
279 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
280 u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
281 u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
282 int num_ps_gprs, num_vs_gprs, num_temp_gprs;
283 int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
284 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
285 int num_hs_threads, num_ls_threads;
286 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
287 int num_hs_stack_entries, num_ls_stack_entries;
288 u64 gpu_addr;
289 int dwords;
290
291 /* set clear context state */
292 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
293 radeon_ring_write(ring, 0);
294
295 if (rdev->family < CHIP_CAYMAN) {
296 switch (rdev->family) {
297 case CHIP_CEDAR:
298 default:
299 num_ps_gprs = 93;
300 num_vs_gprs = 46;
301 num_temp_gprs = 4;
302 num_gs_gprs = 31;
303 num_es_gprs = 31;
304 num_hs_gprs = 23;
305 num_ls_gprs = 23;
306 num_ps_threads = 96;
307 num_vs_threads = 16;
308 num_gs_threads = 16;
309 num_es_threads = 16;
310 num_hs_threads = 16;
311 num_ls_threads = 16;
312 num_ps_stack_entries = 42;
313 num_vs_stack_entries = 42;
314 num_gs_stack_entries = 42;
315 num_es_stack_entries = 42;
316 num_hs_stack_entries = 42;
317 num_ls_stack_entries = 42;
318 break;
319 case CHIP_REDWOOD:
320 num_ps_gprs = 93;
321 num_vs_gprs = 46;
322 num_temp_gprs = 4;
323 num_gs_gprs = 31;
324 num_es_gprs = 31;
325 num_hs_gprs = 23;
326 num_ls_gprs = 23;
327 num_ps_threads = 128;
328 num_vs_threads = 20;
329 num_gs_threads = 20;
330 num_es_threads = 20;
331 num_hs_threads = 20;
332 num_ls_threads = 20;
333 num_ps_stack_entries = 42;
334 num_vs_stack_entries = 42;
335 num_gs_stack_entries = 42;
336 num_es_stack_entries = 42;
337 num_hs_stack_entries = 42;
338 num_ls_stack_entries = 42;
339 break;
340 case CHIP_JUNIPER:
341 num_ps_gprs = 93;
342 num_vs_gprs = 46;
343 num_temp_gprs = 4;
344 num_gs_gprs = 31;
345 num_es_gprs = 31;
346 num_hs_gprs = 23;
347 num_ls_gprs = 23;
348 num_ps_threads = 128;
349 num_vs_threads = 20;
350 num_gs_threads = 20;
351 num_es_threads = 20;
352 num_hs_threads = 20;
353 num_ls_threads = 20;
354 num_ps_stack_entries = 85;
355 num_vs_stack_entries = 85;
356 num_gs_stack_entries = 85;
357 num_es_stack_entries = 85;
358 num_hs_stack_entries = 85;
359 num_ls_stack_entries = 85;
360 break;
361 case CHIP_CYPRESS:
362 case CHIP_HEMLOCK:
363 num_ps_gprs = 93;
364 num_vs_gprs = 46;
365 num_temp_gprs = 4;
366 num_gs_gprs = 31;
367 num_es_gprs = 31;
368 num_hs_gprs = 23;
369 num_ls_gprs = 23;
370 num_ps_threads = 128;
371 num_vs_threads = 20;
372 num_gs_threads = 20;
373 num_es_threads = 20;
374 num_hs_threads = 20;
375 num_ls_threads = 20;
376 num_ps_stack_entries = 85;
377 num_vs_stack_entries = 85;
378 num_gs_stack_entries = 85;
379 num_es_stack_entries = 85;
380 num_hs_stack_entries = 85;
381 num_ls_stack_entries = 85;
382 break;
383 case CHIP_PALM:
384 num_ps_gprs = 93;
385 num_vs_gprs = 46;
386 num_temp_gprs = 4;
387 num_gs_gprs = 31;
388 num_es_gprs = 31;
389 num_hs_gprs = 23;
390 num_ls_gprs = 23;
391 num_ps_threads = 96;
392 num_vs_threads = 16;
393 num_gs_threads = 16;
394 num_es_threads = 16;
395 num_hs_threads = 16;
396 num_ls_threads = 16;
397 num_ps_stack_entries = 42;
398 num_vs_stack_entries = 42;
399 num_gs_stack_entries = 42;
400 num_es_stack_entries = 42;
401 num_hs_stack_entries = 42;
402 num_ls_stack_entries = 42;
403 break;
404 case CHIP_SUMO:
405 num_ps_gprs = 93;
406 num_vs_gprs = 46;
407 num_temp_gprs = 4;
408 num_gs_gprs = 31;
409 num_es_gprs = 31;
410 num_hs_gprs = 23;
411 num_ls_gprs = 23;
412 num_ps_threads = 96;
413 num_vs_threads = 25;
414 num_gs_threads = 25;
415 num_es_threads = 25;
416 num_hs_threads = 25;
417 num_ls_threads = 25;
418 num_ps_stack_entries = 42;
419 num_vs_stack_entries = 42;
420 num_gs_stack_entries = 42;
421 num_es_stack_entries = 42;
422 num_hs_stack_entries = 42;
423 num_ls_stack_entries = 42;
424 break;
425 case CHIP_SUMO2:
426 num_ps_gprs = 93;
427 num_vs_gprs = 46;
428 num_temp_gprs = 4;
429 num_gs_gprs = 31;
430 num_es_gprs = 31;
431 num_hs_gprs = 23;
432 num_ls_gprs = 23;
433 num_ps_threads = 96;
434 num_vs_threads = 25;
435 num_gs_threads = 25;
436 num_es_threads = 25;
437 num_hs_threads = 25;
438 num_ls_threads = 25;
439 num_ps_stack_entries = 85;
440 num_vs_stack_entries = 85;
441 num_gs_stack_entries = 85;
442 num_es_stack_entries = 85;
443 num_hs_stack_entries = 85;
444 num_ls_stack_entries = 85;
445 break;
446 case CHIP_BARTS:
447 num_ps_gprs = 93;
448 num_vs_gprs = 46;
449 num_temp_gprs = 4;
450 num_gs_gprs = 31;
451 num_es_gprs = 31;
452 num_hs_gprs = 23;
453 num_ls_gprs = 23;
454 num_ps_threads = 128;
455 num_vs_threads = 20;
456 num_gs_threads = 20;
457 num_es_threads = 20;
458 num_hs_threads = 20;
459 num_ls_threads = 20;
460 num_ps_stack_entries = 85;
461 num_vs_stack_entries = 85;
462 num_gs_stack_entries = 85;
463 num_es_stack_entries = 85;
464 num_hs_stack_entries = 85;
465 num_ls_stack_entries = 85;
466 break;
467 case CHIP_TURKS:
468 num_ps_gprs = 93;
469 num_vs_gprs = 46;
470 num_temp_gprs = 4;
471 num_gs_gprs = 31;
472 num_es_gprs = 31;
473 num_hs_gprs = 23;
474 num_ls_gprs = 23;
475 num_ps_threads = 128;
476 num_vs_threads = 20;
477 num_gs_threads = 20;
478 num_es_threads = 20;
479 num_hs_threads = 20;
480 num_ls_threads = 20;
481 num_ps_stack_entries = 42;
482 num_vs_stack_entries = 42;
483 num_gs_stack_entries = 42;
484 num_es_stack_entries = 42;
485 num_hs_stack_entries = 42;
486 num_ls_stack_entries = 42;
487 break;
488 case CHIP_CAICOS:
489 num_ps_gprs = 93;
490 num_vs_gprs = 46;
491 num_temp_gprs = 4;
492 num_gs_gprs = 31;
493 num_es_gprs = 31;
494 num_hs_gprs = 23;
495 num_ls_gprs = 23;
496 num_ps_threads = 128;
497 num_vs_threads = 10;
498 num_gs_threads = 10;
499 num_es_threads = 10;
500 num_hs_threads = 10;
501 num_ls_threads = 10;
502 num_ps_stack_entries = 42;
503 num_vs_stack_entries = 42;
504 num_gs_stack_entries = 42;
505 num_es_stack_entries = 42;
506 num_hs_stack_entries = 42;
507 num_ls_stack_entries = 42;
508 break;
509 }
510
511 if ((rdev->family == CHIP_CEDAR) ||
512 (rdev->family == CHIP_PALM) ||
513 (rdev->family == CHIP_SUMO) ||
514 (rdev->family == CHIP_SUMO2) ||
515 (rdev->family == CHIP_CAICOS))
516 sq_config = 0;
517 else
518 sq_config = VC_ENABLE;
519
520 sq_config |= (EXPORT_SRC_C |
521 CS_PRIO(0) |
522 LS_PRIO(0) |
523 HS_PRIO(0) |
524 PS_PRIO(0) |
525 VS_PRIO(1) |
526 GS_PRIO(2) |
527 ES_PRIO(3));
528
529 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
530 NUM_VS_GPRS(num_vs_gprs) |
531 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
532 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
533 NUM_ES_GPRS(num_es_gprs));
534 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
535 NUM_LS_GPRS(num_ls_gprs));
536 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
537 NUM_VS_THREADS(num_vs_threads) |
538 NUM_GS_THREADS(num_gs_threads) |
539 NUM_ES_THREADS(num_es_threads));
540 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
541 NUM_LS_THREADS(num_ls_threads));
542 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
543 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
544 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
545 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
546 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
547 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
548
549 /* disable dyn gprs */
550 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
551 radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
552 radeon_ring_write(ring, 0);
553
554 /* setup LDS */
555 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
556 radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
557 radeon_ring_write(ring, 0x10001000);
558
559 /* SQ config */
560 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
561 radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
562 radeon_ring_write(ring, sq_config);
563 radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
564 radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
565 radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
566 radeon_ring_write(ring, 0);
567 radeon_ring_write(ring, 0);
568 radeon_ring_write(ring, sq_thread_resource_mgmt);
569 radeon_ring_write(ring, sq_thread_resource_mgmt_2);
570 radeon_ring_write(ring, sq_stack_resource_mgmt_1);
571 radeon_ring_write(ring, sq_stack_resource_mgmt_2);
572 radeon_ring_write(ring, sq_stack_resource_mgmt_3);
573 }
574
575 /* CONTEXT_CONTROL */
576 radeon_ring_write(ring, 0xc0012800);
577 radeon_ring_write(ring, 0x80000000);
578 radeon_ring_write(ring, 0x80000000);
579
580 /* SQ_VTX_BASE_VTX_LOC */
581 radeon_ring_write(ring, 0xc0026f00);
582 radeon_ring_write(ring, 0x00000000);
583 radeon_ring_write(ring, 0x00000000);
584 radeon_ring_write(ring, 0x00000000);
585
586 /* SET_SAMPLER */
587 radeon_ring_write(ring, 0xc0036e00);
588 radeon_ring_write(ring, 0x00000000);
589 radeon_ring_write(ring, 0x00000012);
590 radeon_ring_write(ring, 0x00000000);
591 radeon_ring_write(ring, 0x00000000);
592
593 /* set to DX10/11 mode */
594 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
595 radeon_ring_write(ring, 1);
596
597 /* emit an IB pointing at default state */
598 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
599 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
600 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
601 radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
602 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
603 radeon_ring_write(ring, dwords);
604
605}
606
607int evergreen_blit_init(struct radeon_device *rdev)
608{
609 u32 obj_size;
610 int i, r, dwords;
611 void *ptr;
612 u32 packet2s[16];
613 int num_packet2s = 0;
614
615 rdev->r600_blit.primitives.set_render_target = set_render_target;
616 rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
617 rdev->r600_blit.primitives.set_shaders = set_shaders;
618 rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
619 rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
620 rdev->r600_blit.primitives.set_scissors = set_scissors;
621 rdev->r600_blit.primitives.draw_auto = draw_auto;
622 rdev->r600_blit.primitives.set_default_state = set_default_state;
623
624 rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
625 rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
626 rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
627 rdev->r600_blit.ring_size_common += 5; /* done copy */
628 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
629
630 rdev->r600_blit.ring_size_per_loop = 74;
631 if (rdev->family >= CHIP_CAYMAN)
632 rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
633
634 rdev->r600_blit.max_dim = 16384;
635
636 rdev->r600_blit.state_offset = 0;
637
638 if (rdev->family < CHIP_CAYMAN)
639 rdev->r600_blit.state_len = evergreen_default_size;
640 else
641 rdev->r600_blit.state_len = cayman_default_size;
642
643 dwords = rdev->r600_blit.state_len;
644 while (dwords & 0xf) {
645 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
646 dwords++;
647 }
648
649 obj_size = dwords * 4;
650 obj_size = ALIGN(obj_size, 256);
651
652 rdev->r600_blit.vs_offset = obj_size;
653 if (rdev->family < CHIP_CAYMAN)
654 obj_size += evergreen_vs_size * 4;
655 else
656 obj_size += cayman_vs_size * 4;
657 obj_size = ALIGN(obj_size, 256);
658
659 rdev->r600_blit.ps_offset = obj_size;
660 if (rdev->family < CHIP_CAYMAN)
661 obj_size += evergreen_ps_size * 4;
662 else
663 obj_size += cayman_ps_size * 4;
664 obj_size = ALIGN(obj_size, 256);
665
666 /* pin copy shader into vram if not already initialized */
667 if (!rdev->r600_blit.shader_obj) {
668 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
669 RADEON_GEM_DOMAIN_VRAM,
670 NULL, &rdev->r600_blit.shader_obj);
671 if (r) {
672 DRM_ERROR("evergreen failed to allocate shader\n");
673 return r;
674 }
675
676 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
677 if (unlikely(r != 0))
678 return r;
679 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
680 &rdev->r600_blit.shader_gpu_addr);
681 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
682 if (r) {
683 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
684 return r;
685 }
686 }
687
688 DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
689 obj_size,
690 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
691
692 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
693 if (unlikely(r != 0))
694 return r;
695 r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
696 if (r) {
697 DRM_ERROR("failed to map blit object %d\n", r);
698 return r;
699 }
700
701 if (rdev->family < CHIP_CAYMAN) {
702 memcpy_toio(ptr + rdev->r600_blit.state_offset,
703 evergreen_default_state, rdev->r600_blit.state_len * 4);
704
705 if (num_packet2s)
706 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
707 packet2s, num_packet2s * 4);
708 for (i = 0; i < evergreen_vs_size; i++)
709 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
710 for (i = 0; i < evergreen_ps_size; i++)
711 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
712 } else {
713 memcpy_toio(ptr + rdev->r600_blit.state_offset,
714 cayman_default_state, rdev->r600_blit.state_len * 4);
715
716 if (num_packet2s)
717 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
718 packet2s, num_packet2s * 4);
719 for (i = 0; i < cayman_vs_size; i++)
720 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
721 for (i = 0; i < cayman_ps_size; i++)
722 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
723 }
724 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
725 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
726
727 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
728 return 0;
729}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index f85c0af115b5..d43383470cdf 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -300,58 +300,4 @@ const u32 evergreen_default_state[] =
300 0x00000010, /* */ 300 0x00000010, /* */
301}; 301};
302 302
303const u32 evergreen_vs[] =
304{
305 0x00000004,
306 0x80800400,
307 0x0000a03c,
308 0x95000688,
309 0x00004000,
310 0x15200688,
311 0x00000000,
312 0x00000000,
313 0x3c000000,
314 0x67961001,
315#ifdef __BIG_ENDIAN
316 0x000a0000,
317#else
318 0x00080000,
319#endif
320 0x00000000,
321 0x1c000000,
322 0x67961000,
323#ifdef __BIG_ENDIAN
324 0x00020008,
325#else
326 0x00000008,
327#endif
328 0x00000000,
329};
330
331const u32 evergreen_ps[] =
332{
333 0x00000003,
334 0xa00c0000,
335 0x00000008,
336 0x80400000,
337 0x00000000,
338 0x95200688,
339 0x00380400,
340 0x00146b10,
341 0x00380000,
342 0x20146b10,
343 0x00380400,
344 0x40146b00,
345 0x80380000,
346 0x60146b00,
347 0x00000000,
348 0x00000000,
349 0x00000010,
350 0x000d1000,
351 0xb0800000,
352 0x00000000,
353};
354
355const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
356const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
357const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state); 303const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
new file mode 100644
index 000000000000..6a0656d00ed0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -0,0 +1,190 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "evergreend.h"
28
29u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
30
31/**
32 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
33 *
34 * @rdev: radeon_device pointer
35 * @fence: radeon fence object
36 *
37 * Add a DMA fence packet to the ring to write
38 * the fence seq number and DMA trap packet to generate
39 * an interrupt if needed (evergreen-SI).
40 */
41void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
42 struct radeon_fence *fence)
43{
44 struct radeon_ring *ring = &rdev->ring[fence->ring];
45 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
46 /* write the fence */
47 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
48 radeon_ring_write(ring, addr & 0xfffffffc);
49 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
50 radeon_ring_write(ring, fence->seq);
51 /* generate an interrupt */
52 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
53 /* flush HDP */
54 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
55 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
56 radeon_ring_write(ring, 1);
57}
58
59/**
60 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
61 *
62 * @rdev: radeon_device pointer
63 * @ib: IB object to schedule
64 *
65 * Schedule an IB in the DMA ring (evergreen).
66 */
67void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
68 struct radeon_ib *ib)
69{
70 struct radeon_ring *ring = &rdev->ring[ib->ring];
71
72 if (rdev->wb.enabled) {
73 u32 next_rptr = ring->wptr + 4;
74 while ((next_rptr & 7) != 5)
75 next_rptr++;
76 next_rptr += 3;
77 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
78 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
79 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
80 radeon_ring_write(ring, next_rptr);
81 }
82
83 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
84 * Pad as necessary with NOPs.
85 */
86 while ((ring->wptr & 7) != 5)
87 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
88 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
89 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
90 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
91
92}
93
94/**
95 * evergreen_copy_dma - copy pages using the DMA engine
96 *
97 * @rdev: radeon_device pointer
98 * @src_offset: src GPU address
99 * @dst_offset: dst GPU address
100 * @num_gpu_pages: number of GPU pages to xfer
101 * @fence: radeon fence object
102 *
103 * Copy GPU paging using the DMA engine (evergreen-cayman).
104 * Used by the radeon ttm implementation to move pages if
105 * registered as the asic copy callback.
106 */
107int evergreen_copy_dma(struct radeon_device *rdev,
108 uint64_t src_offset, uint64_t dst_offset,
109 unsigned num_gpu_pages,
110 struct radeon_fence **fence)
111{
112 struct radeon_semaphore *sem = NULL;
113 int ring_index = rdev->asic->copy.dma_ring_index;
114 struct radeon_ring *ring = &rdev->ring[ring_index];
115 u32 size_in_dw, cur_size_in_dw;
116 int i, num_loops;
117 int r = 0;
118
119 r = radeon_semaphore_create(rdev, &sem);
120 if (r) {
121 DRM_ERROR("radeon: moving bo (%d).\n", r);
122 return r;
123 }
124
125 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
126 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
127 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
128 if (r) {
129 DRM_ERROR("radeon: moving bo (%d).\n", r);
130 radeon_semaphore_free(rdev, &sem, NULL);
131 return r;
132 }
133
134 if (radeon_fence_need_sync(*fence, ring->idx)) {
135 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
136 ring->idx);
137 radeon_fence_note_sync(*fence, ring->idx);
138 } else {
139 radeon_semaphore_free(rdev, &sem, NULL);
140 }
141
142 for (i = 0; i < num_loops; i++) {
143 cur_size_in_dw = size_in_dw;
144 if (cur_size_in_dw > 0xFFFFF)
145 cur_size_in_dw = 0xFFFFF;
146 size_in_dw -= cur_size_in_dw;
147 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
148 radeon_ring_write(ring, dst_offset & 0xfffffffc);
149 radeon_ring_write(ring, src_offset & 0xfffffffc);
150 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
151 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
152 src_offset += cur_size_in_dw * 4;
153 dst_offset += cur_size_in_dw * 4;
154 }
155
156 r = radeon_fence_emit(rdev, fence, ring->idx);
157 if (r) {
158 radeon_ring_unlock_undo(rdev, ring);
159 return r;
160 }
161
162 radeon_ring_unlock_commit(rdev, ring);
163 radeon_semaphore_free(rdev, &sem, *fence);
164
165 return r;
166}
167
168/**
169 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
170 *
171 * @rdev: radeon_device pointer
172 * @ring: radeon_ring structure holding ring information
173 *
174 * Check if the async DMA engine is locked up.
175 * Returns true if the engine appears to be locked up, false if not.
176 */
177bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
178{
179 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
180
181 if (!(reset_mask & RADEON_RESET_DMA)) {
182 radeon_ring_lockup_update(ring);
183 return false;
184 }
185 /* force ring activities */
186 radeon_ring_force_activity(rdev, ring);
187 return radeon_ring_test_lockup(rdev, ring);
188}
189
190
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b0d3fb341417..f71ce390aebe 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -32,6 +32,10 @@
32#include "evergreend.h" 32#include "evergreend.h"
33#include "atom.h" 33#include "atom.h"
34 34
35extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
36extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
37extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
38
35/* 39/*
36 * update the N and CTS parameters for a given pixel clock rate 40 * update the N and CTS parameters for a given pixel clock rate
37 */ 41 */
@@ -54,6 +58,45 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
54 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz); 58 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
55} 59}
56 60
61static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
62{
63 struct radeon_device *rdev = encoder->dev->dev_private;
64 struct drm_connector *connector;
65 struct radeon_connector *radeon_connector = NULL;
66 u32 tmp;
67 u8 *sadb;
68 int sad_count;
69
70 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
71 if (connector->encoder == encoder)
72 radeon_connector = to_radeon_connector(connector);
73 }
74
75 if (!radeon_connector) {
76 DRM_ERROR("Couldn't find encoder's connector\n");
77 return;
78 }
79
80 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
81 if (sad_count < 0) {
82 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
83 return;
84 }
85
86 /* program the speaker allocation */
87 tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
88 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
89 /* set HDMI mode */
90 tmp |= HDMI_CONNECTION;
91 if (sad_count)
92 tmp |= SPEAKER_ALLOCATION(sadb[0]);
93 else
94 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
95 WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
96
97 kfree(sadb);
98}
99
57static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder) 100static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
58{ 101{
59 struct radeon_device *rdev = encoder->dev->dev_private; 102 struct radeon_device *rdev = encoder->dev->dev_private;
@@ -148,18 +191,44 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
148 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 191 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
149 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 192 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
150 u32 base_rate = 24000; 193 u32 base_rate = 24000;
194 u32 max_ratio = clock / base_rate;
195 u32 dto_phase;
196 u32 dto_modulo = clock;
197 u32 wallclock_ratio;
198 u32 dto_cntl;
151 199
152 if (!dig || !dig->afmt) 200 if (!dig || !dig->afmt)
153 return; 201 return;
154 202
203 if (ASIC_IS_DCE6(rdev)) {
204 dto_phase = 24 * 1000;
205 } else {
206 if (max_ratio >= 8) {
207 dto_phase = 192 * 1000;
208 wallclock_ratio = 3;
209 } else if (max_ratio >= 4) {
210 dto_phase = 96 * 1000;
211 wallclock_ratio = 2;
212 } else if (max_ratio >= 2) {
213 dto_phase = 48 * 1000;
214 wallclock_ratio = 1;
215 } else {
216 dto_phase = 24 * 1000;
217 wallclock_ratio = 0;
218 }
219 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
220 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
221 WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
222 }
223
155 /* XXX two dtos; generally use dto0 for hdmi */ 224 /* XXX two dtos; generally use dto0 for hdmi */
156 /* Express [24MHz / target pixel clock] as an exact rational 225 /* Express [24MHz / target pixel clock] as an exact rational
157 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 226 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
158 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 227 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
159 */ 228 */
160 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
161 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
162 WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); 229 WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
230 WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
231 WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
163} 232}
164 233
165 234
@@ -238,13 +307,23 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
238 AFMT_60958_CS_CHANNEL_NUMBER_6(7) | 307 AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
239 AFMT_60958_CS_CHANNEL_NUMBER_7(8)); 308 AFMT_60958_CS_CHANNEL_NUMBER_7(8));
240 309
241 /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */ 310 if (ASIC_IS_DCE6(rdev)) {
311 dce6_afmt_write_speaker_allocation(encoder);
312 } else {
313 dce4_afmt_write_speaker_allocation(encoder);
314 }
242 315
243 WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, 316 WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
244 AFMT_AUDIO_CHANNEL_ENABLE(0xff)); 317 AFMT_AUDIO_CHANNEL_ENABLE(0xff));
245 318
246 /* fglrx sets 0x40 in 0x5f80 here */ 319 /* fglrx sets 0x40 in 0x5f80 here */
247 evergreen_hdmi_write_sad_regs(encoder); 320
321 if (ASIC_IS_DCE6(rdev)) {
322 dce6_afmt_select_pin(encoder);
323 dce6_afmt_write_sad_regs(encoder);
324 } else {
325 evergreen_hdmi_write_sad_regs(encoder);
326 }
248 327
249 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 328 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
250 if (err < 0) { 329 if (err < 0) {
@@ -280,6 +359,8 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
280 359
281void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) 360void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
282{ 361{
362 struct drm_device *dev = encoder->dev;
363 struct radeon_device *rdev = dev->dev_private;
283 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 364 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
284 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 365 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
285 366
@@ -292,6 +373,15 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
292 if (!enable && !dig->afmt->enabled) 373 if (!enable && !dig->afmt->enabled)
293 return; 374 return;
294 375
376 if (enable) {
377 if (ASIC_IS_DCE6(rdev))
378 dig->afmt->pin = dce6_audio_get_pin(rdev);
379 else
380 dig->afmt->pin = r600_audio_get_pin(rdev);
381 } else {
382 dig->afmt->pin = NULL;
383 }
384
295 dig->afmt->enabled = enable; 385 dig->afmt->enabled = enable;
296 386
297 DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", 387 DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a7baf67aef6c..8768fd6a1e27 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -497,6 +497,9 @@
497#define DCCG_AUDIO_DTO0_MODULE 0x05b4 497#define DCCG_AUDIO_DTO0_MODULE 0x05b4
498#define DCCG_AUDIO_DTO0_LOAD 0x05b8 498#define DCCG_AUDIO_DTO0_LOAD 0x05b8
499#define DCCG_AUDIO_DTO0_CNTL 0x05bc 499#define DCCG_AUDIO_DTO0_CNTL 0x05bc
500# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
501# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
502# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
500 503
501#define DCCG_AUDIO_DTO1_PHASE 0x05c0 504#define DCCG_AUDIO_DTO1_PHASE 0x05c0
502#define DCCG_AUDIO_DTO1_MODULE 0x05c4 505#define DCCG_AUDIO_DTO1_MODULE 0x05c4
@@ -711,6 +714,13 @@
711#define AFMT_GENERIC0_7 0x7138 714#define AFMT_GENERIC0_7 0x7138
712 715
713/* DCE4/5 ELD audio interface */ 716/* DCE4/5 ELD audio interface */
717#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x5f78
718#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
719#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
720#define SPEAKER_ALLOCATION_SHIFT 0
721#define HDMI_CONNECTION (1 << 16)
722#define DP_CONNECTION (1 << 17)
723
714#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */ 724#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
715#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */ 725#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
716#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */ 726#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
@@ -1150,6 +1160,10 @@
1150# define LATENCY_LOW_WATERMARK(x) ((x) << 0) 1160# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
1151# define LATENCY_HIGH_WATERMARK(x) ((x) << 16) 1161# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
1152 1162
1163#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
1164# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
1165# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
1166
1153#define IH_RB_CNTL 0x3e00 1167#define IH_RB_CNTL 0x3e00
1154# define IH_RB_ENABLE (1 << 0) 1168# define IH_RB_ENABLE (1 << 0)
1155# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ 1169# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
new file mode 100644
index 000000000000..ecd60809db4e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -0,0 +1,2645 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "cikd.h"
27#include "r600_dpm.h"
28#include "kv_dpm.h"
29#include "radeon_asic.h"
30#include <linux/seq_file.h>
31
32#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
33#define KV_MINIMUM_ENGINE_CLOCK 800
34#define SMC_RAM_END 0x40000
35
36static void kv_init_graphics_levels(struct radeon_device *rdev);
37static int kv_calculate_ds_divider(struct radeon_device *rdev);
38static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
39static int kv_calculate_dpm_settings(struct radeon_device *rdev);
40static void kv_enable_new_levels(struct radeon_device *rdev);
41static void kv_program_nbps_index_settings(struct radeon_device *rdev,
42 struct radeon_ps *new_rps);
43static int kv_set_enabled_levels(struct radeon_device *rdev);
44static int kv_force_dpm_highest(struct radeon_device *rdev);
45static int kv_force_dpm_lowest(struct radeon_device *rdev);
46static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
47 struct radeon_ps *new_rps,
48 struct radeon_ps *old_rps);
49static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
50 int min_temp, int max_temp);
51static int kv_init_fps_limits(struct radeon_device *rdev);
52
53void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
54static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
55static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
56static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
57
58extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
59extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
60extern void cik_update_cg(struct radeon_device *rdev,
61 u32 block, bool enable);
62
63static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
64{
65 { 0, 4, 1 },
66 { 1, 4, 1 },
67 { 2, 5, 1 },
68 { 3, 4, 2 },
69 { 4, 1, 1 },
70 { 5, 5, 2 },
71 { 6, 6, 1 },
72 { 7, 9, 2 },
73 { 0xffffffff }
74};
75
76static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
77{
78 { 0, 4, 1 },
79 { 0xffffffff }
80};
81
82static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
83{
84 { 0, 4, 1 },
85 { 0xffffffff }
86};
87
88static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
89{
90 { 0, 4, 1 },
91 { 0xffffffff }
92};
93
94static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
95{
96 { 0, 4, 1 },
97 { 0xffffffff }
98};
99
100static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
101{
102 { 0, 4, 1 },
103 { 1, 4, 1 },
104 { 2, 5, 1 },
105 { 3, 4, 1 },
106 { 4, 1, 1 },
107 { 5, 5, 1 },
108 { 6, 6, 1 },
109 { 7, 9, 1 },
110 { 8, 4, 1 },
111 { 9, 2, 1 },
112 { 10, 3, 1 },
113 { 11, 6, 1 },
114 { 12, 8, 2 },
115 { 13, 1, 1 },
116 { 14, 2, 1 },
117 { 15, 3, 1 },
118 { 16, 1, 1 },
119 { 17, 4, 1 },
120 { 18, 3, 1 },
121 { 19, 1, 1 },
122 { 20, 8, 1 },
123 { 21, 5, 1 },
124 { 22, 1, 1 },
125 { 23, 1, 1 },
126 { 24, 4, 1 },
127 { 27, 6, 1 },
128 { 28, 1, 1 },
129 { 0xffffffff }
130};
131
132static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
133{
134 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
135};
136
137static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
138{
139 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
140};
141
142static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
143{
144 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
145};
146
147static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
148{
149 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
150};
151
152static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
153{
154 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
155};
156
157static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
158{
159 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
160};
161
162static const struct kv_pt_config_reg didt_config_kv[] =
163{
164 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
165 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
166 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
167 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
168 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
169 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
170 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
171 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
172 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
173 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
174 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
175 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
176 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
177 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
178 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
179 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
180 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
181 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
182 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
183 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
184 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
185 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
186 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
187 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
188 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
189 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
190 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
191 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
192 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
193 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
194 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
195 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
196 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
197 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
198 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
199 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
200 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
201 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
202 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
203 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
204 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
205 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
206 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
207 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
208 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
209 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
210 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
211 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
212 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
213 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
214 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
215 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
216 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
217 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
218 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
219 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
220 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
221 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
222 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
223 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
224 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
225 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
226 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
227 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
228 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
229 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
230 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
231 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
232 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
233 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
234 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
235 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
236 { 0xFFFFFFFF }
237};
238
239static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
240{
241 struct kv_ps *ps = rps->ps_priv;
242
243 return ps;
244}
245
246static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
247{
248 struct kv_power_info *pi = rdev->pm.dpm.priv;
249
250 return pi;
251}
252
253#if 0
254static void kv_program_local_cac_table(struct radeon_device *rdev,
255 const struct kv_lcac_config_values *local_cac_table,
256 const struct kv_lcac_config_reg *local_cac_reg)
257{
258 u32 i, count, data;
259 const struct kv_lcac_config_values *values = local_cac_table;
260
261 while (values->block_id != 0xffffffff) {
262 count = values->signal_id;
263 for (i = 0; i < count; i++) {
264 data = ((values->block_id << local_cac_reg->block_shift) &
265 local_cac_reg->block_mask);
266 data |= ((i << local_cac_reg->signal_shift) &
267 local_cac_reg->signal_mask);
268 data |= ((values->t << local_cac_reg->t_shift) &
269 local_cac_reg->t_mask);
270 data |= ((1 << local_cac_reg->enable_shift) &
271 local_cac_reg->enable_mask);
272 WREG32_SMC(local_cac_reg->cntl, data);
273 }
274 values++;
275 }
276}
277#endif
278
279static int kv_program_pt_config_registers(struct radeon_device *rdev,
280 const struct kv_pt_config_reg *cac_config_regs)
281{
282 const struct kv_pt_config_reg *config_regs = cac_config_regs;
283 u32 data;
284 u32 cache = 0;
285
286 if (config_regs == NULL)
287 return -EINVAL;
288
289 while (config_regs->offset != 0xFFFFFFFF) {
290 if (config_regs->type == KV_CONFIGREG_CACHE) {
291 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
292 } else {
293 switch (config_regs->type) {
294 case KV_CONFIGREG_SMC_IND:
295 data = RREG32_SMC(config_regs->offset);
296 break;
297 case KV_CONFIGREG_DIDT_IND:
298 data = RREG32_DIDT(config_regs->offset);
299 break;
300 default:
301 data = RREG32(config_regs->offset << 2);
302 break;
303 }
304
305 data &= ~config_regs->mask;
306 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
307 data |= cache;
308 cache = 0;
309
310 switch (config_regs->type) {
311 case KV_CONFIGREG_SMC_IND:
312 WREG32_SMC(config_regs->offset, data);
313 break;
314 case KV_CONFIGREG_DIDT_IND:
315 WREG32_DIDT(config_regs->offset, data);
316 break;
317 default:
318 WREG32(config_regs->offset << 2, data);
319 break;
320 }
321 }
322 config_regs++;
323 }
324
325 return 0;
326}
327
328static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
329{
330 struct kv_power_info *pi = kv_get_pi(rdev);
331 u32 data;
332
333 if (pi->caps_sq_ramping) {
334 data = RREG32_DIDT(DIDT_SQ_CTRL0);
335 if (enable)
336 data |= DIDT_CTRL_EN;
337 else
338 data &= ~DIDT_CTRL_EN;
339 WREG32_DIDT(DIDT_SQ_CTRL0, data);
340 }
341
342 if (pi->caps_db_ramping) {
343 data = RREG32_DIDT(DIDT_DB_CTRL0);
344 if (enable)
345 data |= DIDT_CTRL_EN;
346 else
347 data &= ~DIDT_CTRL_EN;
348 WREG32_DIDT(DIDT_DB_CTRL0, data);
349 }
350
351 if (pi->caps_td_ramping) {
352 data = RREG32_DIDT(DIDT_TD_CTRL0);
353 if (enable)
354 data |= DIDT_CTRL_EN;
355 else
356 data &= ~DIDT_CTRL_EN;
357 WREG32_DIDT(DIDT_TD_CTRL0, data);
358 }
359
360 if (pi->caps_tcp_ramping) {
361 data = RREG32_DIDT(DIDT_TCP_CTRL0);
362 if (enable)
363 data |= DIDT_CTRL_EN;
364 else
365 data &= ~DIDT_CTRL_EN;
366 WREG32_DIDT(DIDT_TCP_CTRL0, data);
367 }
368}
369
370static int kv_enable_didt(struct radeon_device *rdev, bool enable)
371{
372 struct kv_power_info *pi = kv_get_pi(rdev);
373 int ret;
374
375 if (pi->caps_sq_ramping ||
376 pi->caps_db_ramping ||
377 pi->caps_td_ramping ||
378 pi->caps_tcp_ramping) {
379 cik_enter_rlc_safe_mode(rdev);
380
381 if (enable) {
382 ret = kv_program_pt_config_registers(rdev, didt_config_kv);
383 if (ret) {
384 cik_exit_rlc_safe_mode(rdev);
385 return ret;
386 }
387 }
388
389 kv_do_enable_didt(rdev, enable);
390
391 cik_exit_rlc_safe_mode(rdev);
392 }
393
394 return 0;
395}
396
397#if 0
398static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
399{
400 struct kv_power_info *pi = kv_get_pi(rdev);
401
402 if (pi->caps_cac) {
403 WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
404 WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
405 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
406
407 WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
408 WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
409 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
410
411 WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
412 WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
413 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
414
415 WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
416 WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
417 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
418
419 WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
420 WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
421 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
422
423 WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
424 WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
425 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
426 }
427}
428#endif
429
430static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
431{
432 struct kv_power_info *pi = kv_get_pi(rdev);
433 int ret = 0;
434
435 if (pi->caps_cac) {
436 if (enable) {
437 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
438 if (ret)
439 pi->cac_enabled = false;
440 else
441 pi->cac_enabled = true;
442 } else if (pi->cac_enabled) {
443 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
444 pi->cac_enabled = false;
445 }
446 }
447
448 return ret;
449}
450
451static int kv_process_firmware_header(struct radeon_device *rdev)
452{
453 struct kv_power_info *pi = kv_get_pi(rdev);
454 u32 tmp;
455 int ret;
456
457 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
458 offsetof(SMU7_Firmware_Header, DpmTable),
459 &tmp, pi->sram_end);
460
461 if (ret == 0)
462 pi->dpm_table_start = tmp;
463
464 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
465 offsetof(SMU7_Firmware_Header, SoftRegisters),
466 &tmp, pi->sram_end);
467
468 if (ret == 0)
469 pi->soft_regs_start = tmp;
470
471 return ret;
472}
473
474static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
475{
476 struct kv_power_info *pi = kv_get_pi(rdev);
477 int ret;
478
479 pi->graphics_voltage_change_enable = 1;
480
481 ret = kv_copy_bytes_to_smc(rdev,
482 pi->dpm_table_start +
483 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
484 &pi->graphics_voltage_change_enable,
485 sizeof(u8), pi->sram_end);
486
487 return ret;
488}
489
490static int kv_set_dpm_interval(struct radeon_device *rdev)
491{
492 struct kv_power_info *pi = kv_get_pi(rdev);
493 int ret;
494
495 pi->graphics_interval = 1;
496
497 ret = kv_copy_bytes_to_smc(rdev,
498 pi->dpm_table_start +
499 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
500 &pi->graphics_interval,
501 sizeof(u8), pi->sram_end);
502
503 return ret;
504}
505
506static int kv_set_dpm_boot_state(struct radeon_device *rdev)
507{
508 struct kv_power_info *pi = kv_get_pi(rdev);
509 int ret;
510
511 ret = kv_copy_bytes_to_smc(rdev,
512 pi->dpm_table_start +
513 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
514 &pi->graphics_boot_level,
515 sizeof(u8), pi->sram_end);
516
517 return ret;
518}
519
520static void kv_program_vc(struct radeon_device *rdev)
521{
522 WREG32_SMC(CG_FTV_0, 0x3FFFC000);
523}
524
525static void kv_clear_vc(struct radeon_device *rdev)
526{
527 WREG32_SMC(CG_FTV_0, 0);
528}
529
530static int kv_set_divider_value(struct radeon_device *rdev,
531 u32 index, u32 sclk)
532{
533 struct kv_power_info *pi = kv_get_pi(rdev);
534 struct atom_clock_dividers dividers;
535 int ret;
536
537 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
538 sclk, false, &dividers);
539 if (ret)
540 return ret;
541
542 pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
543 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
544
545 return 0;
546}
547
548static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
549 u16 voltage)
550{
551 return 6200 - (voltage * 25);
552}
553
554static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
555 u32 vid_2bit)
556{
557 struct kv_power_info *pi = kv_get_pi(rdev);
558 u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
559 &pi->sys_info.vid_mapping_table,
560 vid_2bit);
561
562 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
563}
564
565
566static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
567{
568 struct kv_power_info *pi = kv_get_pi(rdev);
569
570 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
571 pi->graphics_level[index].MinVddNb =
572 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
573
574 return 0;
575}
576
577static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
578{
579 struct kv_power_info *pi = kv_get_pi(rdev);
580
581 pi->graphics_level[index].AT = cpu_to_be16((u16)at);
582
583 return 0;
584}
585
586static void kv_dpm_power_level_enable(struct radeon_device *rdev,
587 u32 index, bool enable)
588{
589 struct kv_power_info *pi = kv_get_pi(rdev);
590
591 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
592}
593
594static void kv_start_dpm(struct radeon_device *rdev)
595{
596 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
597
598 tmp |= GLOBAL_PWRMGT_EN;
599 WREG32_SMC(GENERAL_PWRMGT, tmp);
600
601 kv_smc_dpm_enable(rdev, true);
602}
603
604static void kv_stop_dpm(struct radeon_device *rdev)
605{
606 kv_smc_dpm_enable(rdev, false);
607}
608
609static void kv_start_am(struct radeon_device *rdev)
610{
611 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
612
613 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
614 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
615
616 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
617}
618
619static void kv_reset_am(struct radeon_device *rdev)
620{
621 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
622
623 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
624
625 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
626}
627
628static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
629{
630 return kv_notify_message_to_smu(rdev, freeze ?
631 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
632}
633
634static int kv_force_lowest_valid(struct radeon_device *rdev)
635{
636 return kv_force_dpm_lowest(rdev);
637}
638
639static int kv_unforce_levels(struct radeon_device *rdev)
640{
641 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
642}
643
644static int kv_update_sclk_t(struct radeon_device *rdev)
645{
646 struct kv_power_info *pi = kv_get_pi(rdev);
647 u32 low_sclk_interrupt_t = 0;
648 int ret = 0;
649
650 if (pi->caps_sclk_throttle_low_notification) {
651 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
652
653 ret = kv_copy_bytes_to_smc(rdev,
654 pi->dpm_table_start +
655 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
656 (u8 *)&low_sclk_interrupt_t,
657 sizeof(u32), pi->sram_end);
658 }
659 return ret;
660}
661
662static int kv_program_bootup_state(struct radeon_device *rdev)
663{
664 struct kv_power_info *pi = kv_get_pi(rdev);
665 u32 i;
666 struct radeon_clock_voltage_dependency_table *table =
667 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
668
669 if (table && table->count) {
670 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
671 if ((table->entries[i].clk == pi->boot_pl.sclk) ||
672 (i == 0))
673 break;
674 }
675
676 pi->graphics_boot_level = (u8)i;
677 kv_dpm_power_level_enable(rdev, i, true);
678 } else {
679 struct sumo_sclk_voltage_mapping_table *table =
680 &pi->sys_info.sclk_voltage_mapping_table;
681
682 if (table->num_max_dpm_entries == 0)
683 return -EINVAL;
684
685 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
686 if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
687 (i == 0))
688 break;
689 }
690
691 pi->graphics_boot_level = (u8)i;
692 kv_dpm_power_level_enable(rdev, i, true);
693 }
694 return 0;
695}
696
697static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
698{
699 struct kv_power_info *pi = kv_get_pi(rdev);
700 int ret;
701
702 pi->graphics_therm_throttle_enable = 1;
703
704 ret = kv_copy_bytes_to_smc(rdev,
705 pi->dpm_table_start +
706 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
707 &pi->graphics_therm_throttle_enable,
708 sizeof(u8), pi->sram_end);
709
710 return ret;
711}
712
713static int kv_upload_dpm_settings(struct radeon_device *rdev)
714{
715 struct kv_power_info *pi = kv_get_pi(rdev);
716 int ret;
717
718 ret = kv_copy_bytes_to_smc(rdev,
719 pi->dpm_table_start +
720 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
721 (u8 *)&pi->graphics_level,
722 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
723 pi->sram_end);
724
725 if (ret)
726 return ret;
727
728 ret = kv_copy_bytes_to_smc(rdev,
729 pi->dpm_table_start +
730 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
731 &pi->graphics_dpm_level_count,
732 sizeof(u8), pi->sram_end);
733
734 return ret;
735}
736
737static u32 kv_get_clock_difference(u32 a, u32 b)
738{
739 return (a >= b) ? a - b : b - a;
740}
741
742static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
743{
744 struct kv_power_info *pi = kv_get_pi(rdev);
745 u32 value;
746
747 if (pi->caps_enable_dfs_bypass) {
748 if (kv_get_clock_difference(clk, 40000) < 200)
749 value = 3;
750 else if (kv_get_clock_difference(clk, 30000) < 200)
751 value = 2;
752 else if (kv_get_clock_difference(clk, 20000) < 200)
753 value = 7;
754 else if (kv_get_clock_difference(clk, 15000) < 200)
755 value = 6;
756 else if (kv_get_clock_difference(clk, 10000) < 200)
757 value = 8;
758 else
759 value = 0;
760 } else {
761 value = 0;
762 }
763
764 return value;
765}
766
767static int kv_populate_uvd_table(struct radeon_device *rdev)
768{
769 struct kv_power_info *pi = kv_get_pi(rdev);
770 struct radeon_uvd_clock_voltage_dependency_table *table =
771 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
772 struct atom_clock_dividers dividers;
773 int ret;
774 u32 i;
775
776 if (table == NULL || table->count == 0)
777 return 0;
778
779 pi->uvd_level_count = 0;
780 for (i = 0; i < table->count; i++) {
781 if (pi->high_voltage_t &&
782 (pi->high_voltage_t < table->entries[i].v))
783 break;
784
785 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
786 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
787 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
788
789 pi->uvd_level[i].VClkBypassCntl =
790 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
791 pi->uvd_level[i].DClkBypassCntl =
792 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
793
794 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
795 table->entries[i].vclk, false, &dividers);
796 if (ret)
797 return ret;
798 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
799
800 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
801 table->entries[i].dclk, false, &dividers);
802 if (ret)
803 return ret;
804 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
805
806 pi->uvd_level_count++;
807 }
808
809 ret = kv_copy_bytes_to_smc(rdev,
810 pi->dpm_table_start +
811 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
812 (u8 *)&pi->uvd_level_count,
813 sizeof(u8), pi->sram_end);
814 if (ret)
815 return ret;
816
817 pi->uvd_interval = 1;
818
819 ret = kv_copy_bytes_to_smc(rdev,
820 pi->dpm_table_start +
821 offsetof(SMU7_Fusion_DpmTable, UVDInterval),
822 &pi->uvd_interval,
823 sizeof(u8), pi->sram_end);
824 if (ret)
825 return ret;
826
827 ret = kv_copy_bytes_to_smc(rdev,
828 pi->dpm_table_start +
829 offsetof(SMU7_Fusion_DpmTable, UvdLevel),
830 (u8 *)&pi->uvd_level,
831 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
832 pi->sram_end);
833
834 return ret;
835
836}
837
838static int kv_populate_vce_table(struct radeon_device *rdev)
839{
840 struct kv_power_info *pi = kv_get_pi(rdev);
841 int ret;
842 u32 i;
843 struct radeon_vce_clock_voltage_dependency_table *table =
844 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
845 struct atom_clock_dividers dividers;
846
847 if (table == NULL || table->count == 0)
848 return 0;
849
850 pi->vce_level_count = 0;
851 for (i = 0; i < table->count; i++) {
852 if (pi->high_voltage_t &&
853 pi->high_voltage_t < table->entries[i].v)
854 break;
855
856 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
857 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
858
859 pi->vce_level[i].ClkBypassCntl =
860 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
861
862 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
863 table->entries[i].evclk, false, &dividers);
864 if (ret)
865 return ret;
866 pi->vce_level[i].Divider = (u8)dividers.post_div;
867
868 pi->vce_level_count++;
869 }
870
871 ret = kv_copy_bytes_to_smc(rdev,
872 pi->dpm_table_start +
873 offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
874 (u8 *)&pi->vce_level_count,
875 sizeof(u8),
876 pi->sram_end);
877 if (ret)
878 return ret;
879
880 pi->vce_interval = 1;
881
882 ret = kv_copy_bytes_to_smc(rdev,
883 pi->dpm_table_start +
884 offsetof(SMU7_Fusion_DpmTable, VCEInterval),
885 (u8 *)&pi->vce_interval,
886 sizeof(u8),
887 pi->sram_end);
888 if (ret)
889 return ret;
890
891 ret = kv_copy_bytes_to_smc(rdev,
892 pi->dpm_table_start +
893 offsetof(SMU7_Fusion_DpmTable, VceLevel),
894 (u8 *)&pi->vce_level,
895 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
896 pi->sram_end);
897
898 return ret;
899}
900
901static int kv_populate_samu_table(struct radeon_device *rdev)
902{
903 struct kv_power_info *pi = kv_get_pi(rdev);
904 struct radeon_clock_voltage_dependency_table *table =
905 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
906 struct atom_clock_dividers dividers;
907 int ret;
908 u32 i;
909
910 if (table == NULL || table->count == 0)
911 return 0;
912
913 pi->samu_level_count = 0;
914 for (i = 0; i < table->count; i++) {
915 if (pi->high_voltage_t &&
916 pi->high_voltage_t < table->entries[i].v)
917 break;
918
919 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
920 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
921
922 pi->samu_level[i].ClkBypassCntl =
923 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
924
925 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
926 table->entries[i].clk, false, &dividers);
927 if (ret)
928 return ret;
929 pi->samu_level[i].Divider = (u8)dividers.post_div;
930
931 pi->samu_level_count++;
932 }
933
934 ret = kv_copy_bytes_to_smc(rdev,
935 pi->dpm_table_start +
936 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
937 (u8 *)&pi->samu_level_count,
938 sizeof(u8),
939 pi->sram_end);
940 if (ret)
941 return ret;
942
943 pi->samu_interval = 1;
944
945 ret = kv_copy_bytes_to_smc(rdev,
946 pi->dpm_table_start +
947 offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
948 (u8 *)&pi->samu_interval,
949 sizeof(u8),
950 pi->sram_end);
951 if (ret)
952 return ret;
953
954 ret = kv_copy_bytes_to_smc(rdev,
955 pi->dpm_table_start +
956 offsetof(SMU7_Fusion_DpmTable, SamuLevel),
957 (u8 *)&pi->samu_level,
958 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
959 pi->sram_end);
960 if (ret)
961 return ret;
962
963 return ret;
964}
965
966
967static int kv_populate_acp_table(struct radeon_device *rdev)
968{
969 struct kv_power_info *pi = kv_get_pi(rdev);
970 struct radeon_clock_voltage_dependency_table *table =
971 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
972 struct atom_clock_dividers dividers;
973 int ret;
974 u32 i;
975
976 if (table == NULL || table->count == 0)
977 return 0;
978
979 pi->acp_level_count = 0;
980 for (i = 0; i < table->count; i++) {
981 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
982 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
983
984 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
985 table->entries[i].clk, false, &dividers);
986 if (ret)
987 return ret;
988 pi->acp_level[i].Divider = (u8)dividers.post_div;
989
990 pi->acp_level_count++;
991 }
992
993 ret = kv_copy_bytes_to_smc(rdev,
994 pi->dpm_table_start +
995 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
996 (u8 *)&pi->acp_level_count,
997 sizeof(u8),
998 pi->sram_end);
999 if (ret)
1000 return ret;
1001
1002 pi->acp_interval = 1;
1003
1004 ret = kv_copy_bytes_to_smc(rdev,
1005 pi->dpm_table_start +
1006 offsetof(SMU7_Fusion_DpmTable, ACPInterval),
1007 (u8 *)&pi->acp_interval,
1008 sizeof(u8),
1009 pi->sram_end);
1010 if (ret)
1011 return ret;
1012
1013 ret = kv_copy_bytes_to_smc(rdev,
1014 pi->dpm_table_start +
1015 offsetof(SMU7_Fusion_DpmTable, AcpLevel),
1016 (u8 *)&pi->acp_level,
1017 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
1018 pi->sram_end);
1019 if (ret)
1020 return ret;
1021
1022 return ret;
1023}
1024
1025static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
1026{
1027 struct kv_power_info *pi = kv_get_pi(rdev);
1028 u32 i;
1029 struct radeon_clock_voltage_dependency_table *table =
1030 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1031
1032 if (table && table->count) {
1033 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1034 if (pi->caps_enable_dfs_bypass) {
1035 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
1036 pi->graphics_level[i].ClkBypassCntl = 3;
1037 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
1038 pi->graphics_level[i].ClkBypassCntl = 2;
1039 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
1040 pi->graphics_level[i].ClkBypassCntl = 7;
1041 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
1042 pi->graphics_level[i].ClkBypassCntl = 6;
1043 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
1044 pi->graphics_level[i].ClkBypassCntl = 8;
1045 else
1046 pi->graphics_level[i].ClkBypassCntl = 0;
1047 } else {
1048 pi->graphics_level[i].ClkBypassCntl = 0;
1049 }
1050 }
1051 } else {
1052 struct sumo_sclk_voltage_mapping_table *table =
1053 &pi->sys_info.sclk_voltage_mapping_table;
1054 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1055 if (pi->caps_enable_dfs_bypass) {
1056 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
1057 pi->graphics_level[i].ClkBypassCntl = 3;
1058 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
1059 pi->graphics_level[i].ClkBypassCntl = 2;
1060 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
1061 pi->graphics_level[i].ClkBypassCntl = 7;
1062 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
1063 pi->graphics_level[i].ClkBypassCntl = 6;
1064 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
1065 pi->graphics_level[i].ClkBypassCntl = 8;
1066 else
1067 pi->graphics_level[i].ClkBypassCntl = 0;
1068 } else {
1069 pi->graphics_level[i].ClkBypassCntl = 0;
1070 }
1071 }
1072 }
1073}
1074
1075static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
1076{
1077 return kv_notify_message_to_smu(rdev, enable ?
1078 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1079}
1080
1081static void kv_update_current_ps(struct radeon_device *rdev,
1082 struct radeon_ps *rps)
1083{
1084 struct kv_ps *new_ps = kv_get_ps(rps);
1085 struct kv_power_info *pi = kv_get_pi(rdev);
1086
1087 pi->current_rps = *rps;
1088 pi->current_ps = *new_ps;
1089 pi->current_rps.ps_priv = &pi->current_ps;
1090}
1091
1092static void kv_update_requested_ps(struct radeon_device *rdev,
1093 struct radeon_ps *rps)
1094{
1095 struct kv_ps *new_ps = kv_get_ps(rps);
1096 struct kv_power_info *pi = kv_get_pi(rdev);
1097
1098 pi->requested_rps = *rps;
1099 pi->requested_ps = *new_ps;
1100 pi->requested_rps.ps_priv = &pi->requested_ps;
1101}
1102
1103int kv_dpm_enable(struct radeon_device *rdev)
1104{
1105 struct kv_power_info *pi = kv_get_pi(rdev);
1106 int ret;
1107
1108 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1109 RADEON_CG_BLOCK_SDMA |
1110 RADEON_CG_BLOCK_BIF |
1111 RADEON_CG_BLOCK_HDP), false);
1112
1113 ret = kv_process_firmware_header(rdev);
1114 if (ret) {
1115 DRM_ERROR("kv_process_firmware_header failed\n");
1116 return ret;
1117 }
1118 kv_init_fps_limits(rdev);
1119 kv_init_graphics_levels(rdev);
1120 ret = kv_program_bootup_state(rdev);
1121 if (ret) {
1122 DRM_ERROR("kv_program_bootup_state failed\n");
1123 return ret;
1124 }
1125 kv_calculate_dfs_bypass_settings(rdev);
1126 ret = kv_upload_dpm_settings(rdev);
1127 if (ret) {
1128 DRM_ERROR("kv_upload_dpm_settings failed\n");
1129 return ret;
1130 }
1131 ret = kv_populate_uvd_table(rdev);
1132 if (ret) {
1133 DRM_ERROR("kv_populate_uvd_table failed\n");
1134 return ret;
1135 }
1136 ret = kv_populate_vce_table(rdev);
1137 if (ret) {
1138 DRM_ERROR("kv_populate_vce_table failed\n");
1139 return ret;
1140 }
1141 ret = kv_populate_samu_table(rdev);
1142 if (ret) {
1143 DRM_ERROR("kv_populate_samu_table failed\n");
1144 return ret;
1145 }
1146 ret = kv_populate_acp_table(rdev);
1147 if (ret) {
1148 DRM_ERROR("kv_populate_acp_table failed\n");
1149 return ret;
1150 }
1151 kv_program_vc(rdev);
1152#if 0
1153 kv_initialize_hardware_cac_manager(rdev);
1154#endif
1155 kv_start_am(rdev);
1156 if (pi->enable_auto_thermal_throttling) {
1157 ret = kv_enable_auto_thermal_throttling(rdev);
1158 if (ret) {
1159 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1160 return ret;
1161 }
1162 }
1163 ret = kv_enable_dpm_voltage_scaling(rdev);
1164 if (ret) {
1165 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1166 return ret;
1167 }
1168 ret = kv_set_dpm_interval(rdev);
1169 if (ret) {
1170 DRM_ERROR("kv_set_dpm_interval failed\n");
1171 return ret;
1172 }
1173 ret = kv_set_dpm_boot_state(rdev);
1174 if (ret) {
1175 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1176 return ret;
1177 }
1178 ret = kv_enable_ulv(rdev, true);
1179 if (ret) {
1180 DRM_ERROR("kv_enable_ulv failed\n");
1181 return ret;
1182 }
1183 kv_start_dpm(rdev);
1184 ret = kv_enable_didt(rdev, true);
1185 if (ret) {
1186 DRM_ERROR("kv_enable_didt failed\n");
1187 return ret;
1188 }
1189 ret = kv_enable_smc_cac(rdev, true);
1190 if (ret) {
1191 DRM_ERROR("kv_enable_smc_cac failed\n");
1192 return ret;
1193 }
1194
1195 if (rdev->irq.installed &&
1196 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1197 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1198 if (ret) {
1199 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1200 return ret;
1201 }
1202 rdev->irq.dpm_thermal = true;
1203 radeon_irq_set(rdev);
1204 }
1205
1206 /* powerdown unused blocks for now */
1207 kv_dpm_powergate_acp(rdev, true);
1208 kv_dpm_powergate_samu(rdev, true);
1209 kv_dpm_powergate_vce(rdev, true);
1210 kv_dpm_powergate_uvd(rdev, true);
1211
1212 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1213 RADEON_CG_BLOCK_SDMA |
1214 RADEON_CG_BLOCK_BIF |
1215 RADEON_CG_BLOCK_HDP), true);
1216
1217 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1218
1219 return ret;
1220}
1221
1222void kv_dpm_disable(struct radeon_device *rdev)
1223{
1224 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1225 RADEON_CG_BLOCK_SDMA |
1226 RADEON_CG_BLOCK_BIF |
1227 RADEON_CG_BLOCK_HDP), false);
1228
1229 /* powerup blocks */
1230 kv_dpm_powergate_acp(rdev, false);
1231 kv_dpm_powergate_samu(rdev, false);
1232 kv_dpm_powergate_vce(rdev, false);
1233 kv_dpm_powergate_uvd(rdev, false);
1234
1235 kv_enable_smc_cac(rdev, false);
1236 kv_enable_didt(rdev, false);
1237 kv_clear_vc(rdev);
1238 kv_stop_dpm(rdev);
1239 kv_enable_ulv(rdev, false);
1240 kv_reset_am(rdev);
1241
1242 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1243}
1244
1245#if 0
1246static int kv_write_smc_soft_register(struct radeon_device *rdev,
1247 u16 reg_offset, u32 value)
1248{
1249 struct kv_power_info *pi = kv_get_pi(rdev);
1250
1251 return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
1252 (u8 *)&value, sizeof(u16), pi->sram_end);
1253}
1254
1255static int kv_read_smc_soft_register(struct radeon_device *rdev,
1256 u16 reg_offset, u32 *value)
1257{
1258 struct kv_power_info *pi = kv_get_pi(rdev);
1259
1260 return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
1261 value, pi->sram_end);
1262}
1263#endif
1264
1265static void kv_init_sclk_t(struct radeon_device *rdev)
1266{
1267 struct kv_power_info *pi = kv_get_pi(rdev);
1268
1269 pi->low_sclk_interrupt_t = 0;
1270}
1271
1272static int kv_init_fps_limits(struct radeon_device *rdev)
1273{
1274 struct kv_power_info *pi = kv_get_pi(rdev);
1275 int ret = 0;
1276
1277 if (pi->caps_fps) {
1278 u16 tmp;
1279
1280 tmp = 45;
1281 pi->fps_high_t = cpu_to_be16(tmp);
1282 ret = kv_copy_bytes_to_smc(rdev,
1283 pi->dpm_table_start +
1284 offsetof(SMU7_Fusion_DpmTable, FpsHighT),
1285 (u8 *)&pi->fps_high_t,
1286 sizeof(u16), pi->sram_end);
1287
1288 tmp = 30;
1289 pi->fps_low_t = cpu_to_be16(tmp);
1290
1291 ret = kv_copy_bytes_to_smc(rdev,
1292 pi->dpm_table_start +
1293 offsetof(SMU7_Fusion_DpmTable, FpsLowT),
1294 (u8 *)&pi->fps_low_t,
1295 sizeof(u16), pi->sram_end);
1296
1297 }
1298 return ret;
1299}
1300
1301static void kv_init_powergate_state(struct radeon_device *rdev)
1302{
1303 struct kv_power_info *pi = kv_get_pi(rdev);
1304
1305 pi->uvd_power_gated = false;
1306 pi->vce_power_gated = false;
1307 pi->samu_power_gated = false;
1308 pi->acp_power_gated = false;
1309
1310}
1311
1312static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
1313{
1314 return kv_notify_message_to_smu(rdev, enable ?
1315 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1316}
1317
1318#if 0
1319static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
1320{
1321 return kv_notify_message_to_smu(rdev, enable ?
1322 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1323}
1324#endif
1325
1326static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
1327{
1328 return kv_notify_message_to_smu(rdev, enable ?
1329 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
1330}
1331
1332static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
1333{
1334 return kv_notify_message_to_smu(rdev, enable ?
1335 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
1336}
1337
1338static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1339{
1340 struct kv_power_info *pi = kv_get_pi(rdev);
1341 struct radeon_uvd_clock_voltage_dependency_table *table =
1342 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1343 int ret;
1344
1345 if (!gate) {
1346 if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
1347 pi->uvd_boot_level = table->count - 1;
1348 else
1349 pi->uvd_boot_level = 0;
1350
1351 ret = kv_copy_bytes_to_smc(rdev,
1352 pi->dpm_table_start +
1353 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
1354 (uint8_t *)&pi->uvd_boot_level,
1355 sizeof(u8), pi->sram_end);
1356 if (ret)
1357 return ret;
1358
1359 if (!pi->caps_uvd_dpm ||
1360 pi->caps_stable_p_state)
1361 kv_send_msg_to_smc_with_parameter(rdev,
1362 PPSMC_MSG_UVDDPM_SetEnabledMask,
1363 (1 << pi->uvd_boot_level));
1364 }
1365
1366 return kv_enable_uvd_dpm(rdev, !gate);
1367}
1368
1369#if 0
1370static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
1371{
1372 u8 i;
1373 struct radeon_vce_clock_voltage_dependency_table *table =
1374 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1375
1376 for (i = 0; i < table->count; i++) {
1377 if (table->entries[i].evclk >= 0) /* XXX */
1378 break;
1379 }
1380
1381 return i;
1382}
1383
1384static int kv_update_vce_dpm(struct radeon_device *rdev,
1385 struct radeon_ps *radeon_new_state,
1386 struct radeon_ps *radeon_current_state)
1387{
1388 struct kv_power_info *pi = kv_get_pi(rdev);
1389 struct radeon_vce_clock_voltage_dependency_table *table =
1390 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1391 int ret;
1392
1393 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
1394 if (pi->caps_stable_p_state)
1395 pi->vce_boot_level = table->count - 1;
1396 else
1397 pi->vce_boot_level = kv_get_vce_boot_level(rdev);
1398
1399 ret = kv_copy_bytes_to_smc(rdev,
1400 pi->dpm_table_start +
1401 offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
1402 (u8 *)&pi->vce_boot_level,
1403 sizeof(u8),
1404 pi->sram_end);
1405 if (ret)
1406 return ret;
1407
1408 if (pi->caps_stable_p_state)
1409 kv_send_msg_to_smc_with_parameter(rdev,
1410 PPSMC_MSG_VCEDPM_SetEnabledMask,
1411 (1 << pi->vce_boot_level));
1412
1413 kv_enable_vce_dpm(rdev, true);
1414 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
1415 kv_enable_vce_dpm(rdev, false);
1416 }
1417
1418 return 0;
1419}
1420#endif
1421
1422static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1423{
1424 struct kv_power_info *pi = kv_get_pi(rdev);
1425 struct radeon_clock_voltage_dependency_table *table =
1426 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1427 int ret;
1428
1429 if (!gate) {
1430 if (pi->caps_stable_p_state)
1431 pi->samu_boot_level = table->count - 1;
1432 else
1433 pi->samu_boot_level = 0;
1434
1435 ret = kv_copy_bytes_to_smc(rdev,
1436 pi->dpm_table_start +
1437 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
1438 (u8 *)&pi->samu_boot_level,
1439 sizeof(u8),
1440 pi->sram_end);
1441 if (ret)
1442 return ret;
1443
1444 if (pi->caps_stable_p_state)
1445 kv_send_msg_to_smc_with_parameter(rdev,
1446 PPSMC_MSG_SAMUDPM_SetEnabledMask,
1447 (1 << pi->samu_boot_level));
1448 }
1449
1450 return kv_enable_samu_dpm(rdev, !gate);
1451}
1452
1453static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1454{
1455 struct kv_power_info *pi = kv_get_pi(rdev);
1456 struct radeon_clock_voltage_dependency_table *table =
1457 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1458 int ret;
1459
1460 if (!gate) {
1461 if (pi->caps_stable_p_state)
1462 pi->acp_boot_level = table->count - 1;
1463 else
1464 pi->acp_boot_level = 0;
1465
1466 ret = kv_copy_bytes_to_smc(rdev,
1467 pi->dpm_table_start +
1468 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
1469 (u8 *)&pi->acp_boot_level,
1470 sizeof(u8),
1471 pi->sram_end);
1472 if (ret)
1473 return ret;
1474
1475 if (pi->caps_stable_p_state)
1476 kv_send_msg_to_smc_with_parameter(rdev,
1477 PPSMC_MSG_ACPDPM_SetEnabledMask,
1478 (1 << pi->acp_boot_level));
1479 }
1480
1481 return kv_enable_acp_dpm(rdev, !gate);
1482}
1483
1484void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
1485{
1486 struct kv_power_info *pi = kv_get_pi(rdev);
1487
1488 if (pi->uvd_power_gated == gate)
1489 return;
1490
1491 pi->uvd_power_gated = gate;
1492
1493 if (gate) {
1494 if (pi->caps_uvd_pg) {
1495 uvd_v1_0_stop(rdev);
1496 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
1497 }
1498 kv_update_uvd_dpm(rdev, gate);
1499 if (pi->caps_uvd_pg)
1500 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
1501 } else {
1502 if (pi->caps_uvd_pg) {
1503 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
1504 uvd_v4_2_resume(rdev);
1505 uvd_v1_0_start(rdev);
1506 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
1507 }
1508 kv_update_uvd_dpm(rdev, gate);
1509 }
1510}
1511
1512static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
1513{
1514 struct kv_power_info *pi = kv_get_pi(rdev);
1515
1516 if (pi->vce_power_gated == gate)
1517 return;
1518
1519 pi->vce_power_gated = gate;
1520
1521 if (gate) {
1522 if (pi->caps_vce_pg)
1523 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
1524 } else {
1525 if (pi->caps_vce_pg)
1526 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
1527 }
1528}
1529
1530static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
1531{
1532 struct kv_power_info *pi = kv_get_pi(rdev);
1533
1534 if (pi->samu_power_gated == gate)
1535 return;
1536
1537 pi->samu_power_gated = gate;
1538
1539 if (gate) {
1540 kv_update_samu_dpm(rdev, true);
1541 if (pi->caps_samu_pg)
1542 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
1543 } else {
1544 if (pi->caps_samu_pg)
1545 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
1546 kv_update_samu_dpm(rdev, false);
1547 }
1548}
1549
1550static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
1551{
1552 struct kv_power_info *pi = kv_get_pi(rdev);
1553
1554 if (pi->acp_power_gated == gate)
1555 return;
1556
1557 if (rdev->family == CHIP_KABINI)
1558 return;
1559
1560 pi->acp_power_gated = gate;
1561
1562 if (gate) {
1563 kv_update_acp_dpm(rdev, true);
1564 if (pi->caps_acp_pg)
1565 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
1566 } else {
1567 if (pi->caps_acp_pg)
1568 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
1569 kv_update_acp_dpm(rdev, false);
1570 }
1571}
1572
1573static void kv_set_valid_clock_range(struct radeon_device *rdev,
1574 struct radeon_ps *new_rps)
1575{
1576 struct kv_ps *new_ps = kv_get_ps(new_rps);
1577 struct kv_power_info *pi = kv_get_pi(rdev);
1578 u32 i;
1579 struct radeon_clock_voltage_dependency_table *table =
1580 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1581
1582 if (table && table->count) {
1583 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1584 if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
1585 (i == (pi->graphics_dpm_level_count - 1))) {
1586 pi->lowest_valid = i;
1587 break;
1588 }
1589 }
1590
1591 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
1592 if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
1593 (i == 0)) {
1594 pi->highest_valid = i;
1595 break;
1596 }
1597 }
1598
1599 if (pi->lowest_valid > pi->highest_valid) {
1600 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
1601 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
1602 pi->highest_valid = pi->lowest_valid;
1603 else
1604 pi->lowest_valid = pi->highest_valid;
1605 }
1606 } else {
1607 struct sumo_sclk_voltage_mapping_table *table =
1608 &pi->sys_info.sclk_voltage_mapping_table;
1609
1610 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
1611 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
1612 i == (int)(pi->graphics_dpm_level_count - 1)) {
1613 pi->lowest_valid = i;
1614 break;
1615 }
1616 }
1617
1618 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
1619 if (table->entries[i].sclk_frequency <=
1620 new_ps->levels[new_ps->num_levels - 1].sclk ||
1621 i == 0) {
1622 pi->highest_valid = i;
1623 break;
1624 }
1625 }
1626
1627 if (pi->lowest_valid > pi->highest_valid) {
1628 if ((new_ps->levels[0].sclk -
1629 table->entries[pi->highest_valid].sclk_frequency) >
1630 (table->entries[pi->lowest_valid].sclk_frequency -
1631 new_ps->levels[new_ps->num_levels -1].sclk))
1632 pi->highest_valid = pi->lowest_valid;
1633 else
1634 pi->lowest_valid = pi->highest_valid;
1635 }
1636 }
1637}
1638
1639static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
1640 struct radeon_ps *new_rps)
1641{
1642 struct kv_ps *new_ps = kv_get_ps(new_rps);
1643 struct kv_power_info *pi = kv_get_pi(rdev);
1644 int ret = 0;
1645 u8 clk_bypass_cntl;
1646
1647 if (pi->caps_enable_dfs_bypass) {
1648 clk_bypass_cntl = new_ps->need_dfs_bypass ?
1649 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
1650 ret = kv_copy_bytes_to_smc(rdev,
1651 (pi->dpm_table_start +
1652 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
1653 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
1654 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
1655 &clk_bypass_cntl,
1656 sizeof(u8), pi->sram_end);
1657 }
1658
1659 return ret;
1660}
1661
1662static int kv_enable_nb_dpm(struct radeon_device *rdev)
1663{
1664 struct kv_power_info *pi = kv_get_pi(rdev);
1665 int ret = 0;
1666
1667 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1668 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
1669 if (ret == 0)
1670 pi->nb_dpm_enabled = true;
1671 }
1672
1673 return ret;
1674}
1675
1676int kv_dpm_force_performance_level(struct radeon_device *rdev,
1677 enum radeon_dpm_forced_level level)
1678{
1679 int ret;
1680
1681 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1682 ret = kv_force_dpm_highest(rdev);
1683 if (ret)
1684 return ret;
1685 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1686 ret = kv_force_dpm_lowest(rdev);
1687 if (ret)
1688 return ret;
1689 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
1690 ret = kv_unforce_levels(rdev);
1691 if (ret)
1692 return ret;
1693 }
1694
1695 rdev->pm.dpm.forced_level = level;
1696
1697 return 0;
1698}
1699
1700int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
1701{
1702 struct kv_power_info *pi = kv_get_pi(rdev);
1703 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1704 struct radeon_ps *new_ps = &requested_ps;
1705
1706 kv_update_requested_ps(rdev, new_ps);
1707
1708 kv_apply_state_adjust_rules(rdev,
1709 &pi->requested_rps,
1710 &pi->current_rps);
1711
1712 return 0;
1713}
1714
1715int kv_dpm_set_power_state(struct radeon_device *rdev)
1716{
1717 struct kv_power_info *pi = kv_get_pi(rdev);
1718 struct radeon_ps *new_ps = &pi->requested_rps;
1719 /*struct radeon_ps *old_ps = &pi->current_rps;*/
1720 int ret;
1721
1722 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1723 RADEON_CG_BLOCK_SDMA |
1724 RADEON_CG_BLOCK_BIF |
1725 RADEON_CG_BLOCK_HDP), false);
1726
1727 if (rdev->family == CHIP_KABINI) {
1728 if (pi->enable_dpm) {
1729 kv_set_valid_clock_range(rdev, new_ps);
1730 kv_update_dfs_bypass_settings(rdev, new_ps);
1731 ret = kv_calculate_ds_divider(rdev);
1732 if (ret) {
1733 DRM_ERROR("kv_calculate_ds_divider failed\n");
1734 return ret;
1735 }
1736 kv_calculate_nbps_level_settings(rdev);
1737 kv_calculate_dpm_settings(rdev);
1738 kv_force_lowest_valid(rdev);
1739 kv_enable_new_levels(rdev);
1740 kv_upload_dpm_settings(rdev);
1741 kv_program_nbps_index_settings(rdev, new_ps);
1742 kv_unforce_levels(rdev);
1743 kv_set_enabled_levels(rdev);
1744 kv_force_lowest_valid(rdev);
1745 kv_unforce_levels(rdev);
1746#if 0
1747 ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1748 if (ret) {
1749 DRM_ERROR("kv_update_vce_dpm failed\n");
1750 return ret;
1751 }
1752#endif
1753 kv_update_sclk_t(rdev);
1754 }
1755 } else {
1756 if (pi->enable_dpm) {
1757 kv_set_valid_clock_range(rdev, new_ps);
1758 kv_update_dfs_bypass_settings(rdev, new_ps);
1759 ret = kv_calculate_ds_divider(rdev);
1760 if (ret) {
1761 DRM_ERROR("kv_calculate_ds_divider failed\n");
1762 return ret;
1763 }
1764 kv_calculate_nbps_level_settings(rdev);
1765 kv_calculate_dpm_settings(rdev);
1766 kv_freeze_sclk_dpm(rdev, true);
1767 kv_upload_dpm_settings(rdev);
1768 kv_program_nbps_index_settings(rdev, new_ps);
1769 kv_freeze_sclk_dpm(rdev, false);
1770 kv_set_enabled_levels(rdev);
1771#if 0
1772 ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1773 if (ret) {
1774 DRM_ERROR("kv_update_vce_dpm failed\n");
1775 return ret;
1776 }
1777#endif
1778 kv_update_sclk_t(rdev);
1779 kv_enable_nb_dpm(rdev);
1780 }
1781 }
1782
1783 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1784 RADEON_CG_BLOCK_SDMA |
1785 RADEON_CG_BLOCK_BIF |
1786 RADEON_CG_BLOCK_HDP), true);
1787
1788 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1789 return 0;
1790}
1791
1792void kv_dpm_post_set_power_state(struct radeon_device *rdev)
1793{
1794 struct kv_power_info *pi = kv_get_pi(rdev);
1795 struct radeon_ps *new_ps = &pi->requested_rps;
1796
1797 kv_update_current_ps(rdev, new_ps);
1798}
1799
1800void kv_dpm_setup_asic(struct radeon_device *rdev)
1801{
1802 sumo_take_smu_control(rdev, true);
1803 kv_init_powergate_state(rdev);
1804 kv_init_sclk_t(rdev);
1805}
1806
1807void kv_dpm_reset_asic(struct radeon_device *rdev)
1808{
1809 kv_force_lowest_valid(rdev);
1810 kv_init_graphics_levels(rdev);
1811 kv_program_bootup_state(rdev);
1812 kv_upload_dpm_settings(rdev);
1813 kv_force_lowest_valid(rdev);
1814 kv_unforce_levels(rdev);
1815}
1816
1817//XXX use sumo_dpm_display_configuration_changed
1818
1819static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
1820 struct radeon_clock_and_voltage_limits *table)
1821{
1822 struct kv_power_info *pi = kv_get_pi(rdev);
1823
1824 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
1825 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
1826 table->sclk =
1827 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
1828 table->vddc =
1829 kv_convert_2bit_index_to_voltage(rdev,
1830 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
1831 }
1832
1833 table->mclk = pi->sys_info.nbp_memory_clock[0];
1834}
1835
1836static void kv_patch_voltage_values(struct radeon_device *rdev)
1837{
1838 int i;
1839 struct radeon_uvd_clock_voltage_dependency_table *table =
1840 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1841
1842 if (table->count) {
1843 for (i = 0; i < table->count; i++)
1844 table->entries[i].v =
1845 kv_convert_8bit_index_to_voltage(rdev,
1846 table->entries[i].v);
1847 }
1848
1849}
1850
1851static void kv_construct_boot_state(struct radeon_device *rdev)
1852{
1853 struct kv_power_info *pi = kv_get_pi(rdev);
1854
1855 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
1856 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
1857 pi->boot_pl.ds_divider_index = 0;
1858 pi->boot_pl.ss_divider_index = 0;
1859 pi->boot_pl.allow_gnb_slow = 1;
1860 pi->boot_pl.force_nbp_state = 0;
1861 pi->boot_pl.display_wm = 0;
1862 pi->boot_pl.vce_wm = 0;
1863}
1864
1865static int kv_force_dpm_highest(struct radeon_device *rdev)
1866{
1867 int ret;
1868 u32 enable_mask, i;
1869
1870 ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1871 if (ret)
1872 return ret;
1873
1874 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
1875 if (enable_mask & (1 << i))
1876 break;
1877 }
1878
1879 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1880}
1881
1882static int kv_force_dpm_lowest(struct radeon_device *rdev)
1883{
1884 int ret;
1885 u32 enable_mask, i;
1886
1887 ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1888 if (ret)
1889 return ret;
1890
1891 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
1892 if (enable_mask & (1 << i))
1893 break;
1894 }
1895
1896 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1897}
1898
1899static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1900 u32 sclk, u32 min_sclk_in_sr)
1901{
1902 struct kv_power_info *pi = kv_get_pi(rdev);
1903 u32 i;
1904 u32 temp;
1905 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
1906 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
1907
1908 if (sclk < min)
1909 return 0;
1910
1911 if (!pi->caps_sclk_ds)
1912 return 0;
1913
1914 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
1915 temp = sclk / sumo_get_sleep_divider_from_id(i);
1916 if ((temp >= min) || (i == 0))
1917 break;
1918 }
1919
1920 return (u8)i;
1921}
1922
1923static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
1924{
1925 struct kv_power_info *pi = kv_get_pi(rdev);
1926 struct radeon_clock_voltage_dependency_table *table =
1927 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1928 int i;
1929
1930 if (table && table->count) {
1931 for (i = table->count - 1; i >= 0; i--) {
1932 if (pi->high_voltage_t &&
1933 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
1934 pi->high_voltage_t)) {
1935 *limit = i;
1936 return 0;
1937 }
1938 }
1939 } else {
1940 struct sumo_sclk_voltage_mapping_table *table =
1941 &pi->sys_info.sclk_voltage_mapping_table;
1942
1943 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
1944 if (pi->high_voltage_t &&
1945 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
1946 pi->high_voltage_t)) {
1947 *limit = i;
1948 return 0;
1949 }
1950 }
1951 }
1952
1953 *limit = 0;
1954 return 0;
1955}
1956
1957static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
1958 struct radeon_ps *new_rps,
1959 struct radeon_ps *old_rps)
1960{
1961 struct kv_ps *ps = kv_get_ps(new_rps);
1962 struct kv_power_info *pi = kv_get_pi(rdev);
1963 u32 min_sclk = 10000; /* ??? */
1964 u32 sclk, mclk = 0;
1965 int i, limit;
1966 bool force_high;
1967 struct radeon_clock_voltage_dependency_table *table =
1968 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1969 u32 stable_p_state_sclk = 0;
1970 struct radeon_clock_and_voltage_limits *max_limits =
1971 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1972
1973 mclk = max_limits->mclk;
1974 sclk = min_sclk;
1975
1976 if (pi->caps_stable_p_state) {
1977 stable_p_state_sclk = (max_limits->sclk * 75) / 100;
1978
1979 for (i = table->count - 1; i >= 0; i++) {
1980 if (stable_p_state_sclk >= table->entries[i].clk) {
1981 stable_p_state_sclk = table->entries[i].clk;
1982 break;
1983 }
1984 }
1985
1986 if (i > 0)
1987 stable_p_state_sclk = table->entries[0].clk;
1988
1989 sclk = stable_p_state_sclk;
1990 }
1991
1992 ps->need_dfs_bypass = true;
1993
1994 for (i = 0; i < ps->num_levels; i++) {
1995 if (ps->levels[i].sclk < sclk)
1996 ps->levels[i].sclk = sclk;
1997 }
1998
1999 if (table && table->count) {
2000 for (i = 0; i < ps->num_levels; i++) {
2001 if (pi->high_voltage_t &&
2002 (pi->high_voltage_t <
2003 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2004 kv_get_high_voltage_limit(rdev, &limit);
2005 ps->levels[i].sclk = table->entries[limit].clk;
2006 }
2007 }
2008 } else {
2009 struct sumo_sclk_voltage_mapping_table *table =
2010 &pi->sys_info.sclk_voltage_mapping_table;
2011
2012 for (i = 0; i < ps->num_levels; i++) {
2013 if (pi->high_voltage_t &&
2014 (pi->high_voltage_t <
2015 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2016 kv_get_high_voltage_limit(rdev, &limit);
2017 ps->levels[i].sclk = table->entries[limit].sclk_frequency;
2018 }
2019 }
2020 }
2021
2022 if (pi->caps_stable_p_state) {
2023 for (i = 0; i < ps->num_levels; i++) {
2024 ps->levels[i].sclk = stable_p_state_sclk;
2025 }
2026 }
2027
2028 pi->video_start = new_rps->dclk || new_rps->vclk;
2029
2030 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2031 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
2032 pi->battery_state = true;
2033 else
2034 pi->battery_state = false;
2035
2036 if (rdev->family == CHIP_KABINI) {
2037 ps->dpm0_pg_nb_ps_lo = 0x1;
2038 ps->dpm0_pg_nb_ps_hi = 0x0;
2039 ps->dpmx_nb_ps_lo = 0x1;
2040 ps->dpmx_nb_ps_hi = 0x0;
2041 } else {
2042 ps->dpm0_pg_nb_ps_lo = 0x1;
2043 ps->dpm0_pg_nb_ps_hi = 0x0;
2044 ps->dpmx_nb_ps_lo = 0x2;
2045 ps->dpmx_nb_ps_hi = 0x1;
2046
2047 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2048 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2049 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
2050 pi->disable_nb_ps3_in_battery;
2051 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
2052 ps->dpm0_pg_nb_ps_hi = 0x2;
2053 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
2054 ps->dpmx_nb_ps_hi = 0x2;
2055 }
2056 }
2057}
2058
2059static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
2060 u32 index, bool enable)
2061{
2062 struct kv_power_info *pi = kv_get_pi(rdev);
2063
2064 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
2065}
2066
2067static int kv_calculate_ds_divider(struct radeon_device *rdev)
2068{
2069 struct kv_power_info *pi = kv_get_pi(rdev);
2070 u32 sclk_in_sr = 10000; /* ??? */
2071 u32 i;
2072
2073 if (pi->lowest_valid > pi->highest_valid)
2074 return -EINVAL;
2075
2076 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2077 pi->graphics_level[i].DeepSleepDivId =
2078 kv_get_sleep_divider_id_from_clock(rdev,
2079 be32_to_cpu(pi->graphics_level[i].SclkFrequency),
2080 sclk_in_sr);
2081 }
2082 return 0;
2083}
2084
2085static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
2086{
2087 struct kv_power_info *pi = kv_get_pi(rdev);
2088 u32 i;
2089 bool force_high;
2090 struct radeon_clock_and_voltage_limits *max_limits =
2091 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2092 u32 mclk = max_limits->mclk;
2093
2094 if (pi->lowest_valid > pi->highest_valid)
2095 return -EINVAL;
2096
2097 if (rdev->family == CHIP_KABINI) {
2098 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2099 pi->graphics_level[i].GnbSlow = 1;
2100 pi->graphics_level[i].ForceNbPs1 = 0;
2101 pi->graphics_level[i].UpH = 0;
2102 }
2103
2104 if (!pi->sys_info.nb_dpm_enable)
2105 return 0;
2106
2107 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2108 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
2109
2110 if (force_high) {
2111 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2112 pi->graphics_level[i].GnbSlow = 0;
2113 } else {
2114 if (pi->battery_state)
2115 pi->graphics_level[0].ForceNbPs1 = 1;
2116
2117 pi->graphics_level[1].GnbSlow = 0;
2118 pi->graphics_level[2].GnbSlow = 0;
2119 pi->graphics_level[3].GnbSlow = 0;
2120 pi->graphics_level[4].GnbSlow = 0;
2121 }
2122 } else {
2123 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2124 pi->graphics_level[i].GnbSlow = 1;
2125 pi->graphics_level[i].ForceNbPs1 = 0;
2126 pi->graphics_level[i].UpH = 0;
2127 }
2128
2129 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2130 pi->graphics_level[pi->lowest_valid].UpH = 0x28;
2131 pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
2132 if (pi->lowest_valid != pi->highest_valid)
2133 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
2134 }
2135 }
2136 return 0;
2137}
2138
2139static int kv_calculate_dpm_settings(struct radeon_device *rdev)
2140{
2141 struct kv_power_info *pi = kv_get_pi(rdev);
2142 u32 i;
2143
2144 if (pi->lowest_valid > pi->highest_valid)
2145 return -EINVAL;
2146
2147 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2148 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
2149
2150 return 0;
2151}
2152
2153static void kv_init_graphics_levels(struct radeon_device *rdev)
2154{
2155 struct kv_power_info *pi = kv_get_pi(rdev);
2156 u32 i;
2157 struct radeon_clock_voltage_dependency_table *table =
2158 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2159
2160 if (table && table->count) {
2161 u32 vid_2bit;
2162
2163 pi->graphics_dpm_level_count = 0;
2164 for (i = 0; i < table->count; i++) {
2165 if (pi->high_voltage_t &&
2166 (pi->high_voltage_t <
2167 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
2168 break;
2169
2170 kv_set_divider_value(rdev, i, table->entries[i].clk);
2171 vid_2bit = sumo_convert_vid7_to_vid2(rdev,
2172 &pi->sys_info.vid_mapping_table,
2173 table->entries[i].v);
2174 kv_set_vid(rdev, i, vid_2bit);
2175 kv_set_at(rdev, i, pi->at[i]);
2176 kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2177 pi->graphics_dpm_level_count++;
2178 }
2179 } else {
2180 struct sumo_sclk_voltage_mapping_table *table =
2181 &pi->sys_info.sclk_voltage_mapping_table;
2182
2183 pi->graphics_dpm_level_count = 0;
2184 for (i = 0; i < table->num_max_dpm_entries; i++) {
2185 if (pi->high_voltage_t &&
2186 pi->high_voltage_t <
2187 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
2188 break;
2189
2190 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
2191 kv_set_vid(rdev, i, table->entries[i].vid_2bit);
2192 kv_set_at(rdev, i, pi->at[i]);
2193 kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2194 pi->graphics_dpm_level_count++;
2195 }
2196 }
2197
2198 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2199 kv_dpm_power_level_enable(rdev, i, false);
2200}
2201
2202static void kv_enable_new_levels(struct radeon_device *rdev)
2203{
2204 struct kv_power_info *pi = kv_get_pi(rdev);
2205 u32 i;
2206
2207 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2208 if (i >= pi->lowest_valid && i <= pi->highest_valid)
2209 kv_dpm_power_level_enable(rdev, i, true);
2210 }
2211}
2212
2213static int kv_set_enabled_levels(struct radeon_device *rdev)
2214{
2215 struct kv_power_info *pi = kv_get_pi(rdev);
2216 u32 i, new_mask = 0;
2217
2218 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2219 new_mask |= (1 << i);
2220
2221 return kv_send_msg_to_smc_with_parameter(rdev,
2222 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2223 new_mask);
2224}
2225
2226static void kv_program_nbps_index_settings(struct radeon_device *rdev,
2227 struct radeon_ps *new_rps)
2228{
2229 struct kv_ps *new_ps = kv_get_ps(new_rps);
2230 struct kv_power_info *pi = kv_get_pi(rdev);
2231 u32 nbdpmconfig1;
2232
2233 if (rdev->family == CHIP_KABINI)
2234 return;
2235
2236 if (pi->sys_info.nb_dpm_enable) {
2237 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
2238 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
2239 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
2240 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
2241 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
2242 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
2243 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
2244 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
2245 }
2246}
2247
2248static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
2249 int min_temp, int max_temp)
2250{
2251 int low_temp = 0 * 1000;
2252 int high_temp = 255 * 1000;
2253 u32 tmp;
2254
2255 if (low_temp < min_temp)
2256 low_temp = min_temp;
2257 if (high_temp > max_temp)
2258 high_temp = max_temp;
2259 if (high_temp < low_temp) {
2260 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
2261 return -EINVAL;
2262 }
2263
2264 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
2265 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
2266 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
2267 DIG_THERM_INTL(49 + (low_temp / 1000)));
2268 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
2269
2270 rdev->pm.dpm.thermal.min_temp = low_temp;
2271 rdev->pm.dpm.thermal.max_temp = high_temp;
2272
2273 return 0;
2274}
2275
2276union igp_info {
2277 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2278 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2279 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
2280 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2281 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2282 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2283};
2284
2285static int kv_parse_sys_info_table(struct radeon_device *rdev)
2286{
2287 struct kv_power_info *pi = kv_get_pi(rdev);
2288 struct radeon_mode_info *mode_info = &rdev->mode_info;
2289 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2290 union igp_info *igp_info;
2291 u8 frev, crev;
2292 u16 data_offset;
2293 int i;
2294
2295 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2296 &frev, &crev, &data_offset)) {
2297 igp_info = (union igp_info *)(mode_info->atom_context->bios +
2298 data_offset);
2299
2300 if (crev != 8) {
2301 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
2302 return -EINVAL;
2303 }
2304 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
2305 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
2306 pi->sys_info.bootup_nb_voltage_index =
2307 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
2308 if (igp_info->info_8.ucHtcTmpLmt == 0)
2309 pi->sys_info.htc_tmp_lmt = 203;
2310 else
2311 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
2312 if (igp_info->info_8.ucHtcHystLmt == 0)
2313 pi->sys_info.htc_hyst_lmt = 5;
2314 else
2315 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
2316 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
2317 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2318 }
2319
2320 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
2321 pi->sys_info.nb_dpm_enable = true;
2322 else
2323 pi->sys_info.nb_dpm_enable = false;
2324
2325 for (i = 0; i < KV_NUM_NBPSTATES; i++) {
2326 pi->sys_info.nbp_memory_clock[i] =
2327 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
2328 pi->sys_info.nbp_n_clock[i] =
2329 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
2330 }
2331 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
2332 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
2333 pi->caps_enable_dfs_bypass = true;
2334
2335 sumo_construct_sclk_voltage_mapping_table(rdev,
2336 &pi->sys_info.sclk_voltage_mapping_table,
2337 igp_info->info_8.sAvail_SCLK);
2338
2339 sumo_construct_vid_mapping_table(rdev,
2340 &pi->sys_info.vid_mapping_table,
2341 igp_info->info_8.sAvail_SCLK);
2342
2343 kv_construct_max_power_limits_table(rdev,
2344 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
2345 }
2346 return 0;
2347}
2348
2349union power_info {
2350 struct _ATOM_POWERPLAY_INFO info;
2351 struct _ATOM_POWERPLAY_INFO_V2 info_2;
2352 struct _ATOM_POWERPLAY_INFO_V3 info_3;
2353 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2354 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2355 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2356};
2357
2358union pplib_clock_info {
2359 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2360 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2361 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2362 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2363};
2364
2365union pplib_power_state {
2366 struct _ATOM_PPLIB_STATE v1;
2367 struct _ATOM_PPLIB_STATE_V2 v2;
2368};
2369
2370static void kv_patch_boot_state(struct radeon_device *rdev,
2371 struct kv_ps *ps)
2372{
2373 struct kv_power_info *pi = kv_get_pi(rdev);
2374
2375 ps->num_levels = 1;
2376 ps->levels[0] = pi->boot_pl;
2377}
2378
2379static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
2380 struct radeon_ps *rps,
2381 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2382 u8 table_rev)
2383{
2384 struct kv_ps *ps = kv_get_ps(rps);
2385
2386 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2387 rps->class = le16_to_cpu(non_clock_info->usClassification);
2388 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2389
2390 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2391 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2392 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2393 } else {
2394 rps->vclk = 0;
2395 rps->dclk = 0;
2396 }
2397
2398 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2399 rdev->pm.dpm.boot_ps = rps;
2400 kv_patch_boot_state(rdev, ps);
2401 }
2402 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2403 rdev->pm.dpm.uvd_ps = rps;
2404}
2405
2406static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
2407 struct radeon_ps *rps, int index,
2408 union pplib_clock_info *clock_info)
2409{
2410 struct kv_power_info *pi = kv_get_pi(rdev);
2411 struct kv_ps *ps = kv_get_ps(rps);
2412 struct kv_pl *pl = &ps->levels[index];
2413 u32 sclk;
2414
2415 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2416 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2417 pl->sclk = sclk;
2418 pl->vddc_index = clock_info->sumo.vddcIndex;
2419
2420 ps->num_levels = index + 1;
2421
2422 if (pi->caps_sclk_ds) {
2423 pl->ds_divider_index = 5;
2424 pl->ss_divider_index = 5;
2425 }
2426}
2427
2428static int kv_parse_power_table(struct radeon_device *rdev)
2429{
2430 struct radeon_mode_info *mode_info = &rdev->mode_info;
2431 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2432 union pplib_power_state *power_state;
2433 int i, j, k, non_clock_array_index, clock_array_index;
2434 union pplib_clock_info *clock_info;
2435 struct _StateArray *state_array;
2436 struct _ClockInfoArray *clock_info_array;
2437 struct _NonClockInfoArray *non_clock_info_array;
2438 union power_info *power_info;
2439 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2440 u16 data_offset;
2441 u8 frev, crev;
2442 u8 *power_state_offset;
2443 struct kv_ps *ps;
2444
2445 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2446 &frev, &crev, &data_offset))
2447 return -EINVAL;
2448 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2449
2450 state_array = (struct _StateArray *)
2451 (mode_info->atom_context->bios + data_offset +
2452 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2453 clock_info_array = (struct _ClockInfoArray *)
2454 (mode_info->atom_context->bios + data_offset +
2455 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2456 non_clock_info_array = (struct _NonClockInfoArray *)
2457 (mode_info->atom_context->bios + data_offset +
2458 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2459
2460 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
2461 state_array->ucNumEntries, GFP_KERNEL);
2462 if (!rdev->pm.dpm.ps)
2463 return -ENOMEM;
2464 power_state_offset = (u8 *)state_array->states;
2465 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
2466 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
2467 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
2468 for (i = 0; i < state_array->ucNumEntries; i++) {
2469 u8 *idx;
2470 power_state = (union pplib_power_state *)power_state_offset;
2471 non_clock_array_index = power_state->v2.nonClockInfoIndex;
2472 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2473 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2474 if (!rdev->pm.power_state[i].clock_info)
2475 return -EINVAL;
2476 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
2477 if (ps == NULL) {
2478 kfree(rdev->pm.dpm.ps);
2479 return -ENOMEM;
2480 }
2481 rdev->pm.dpm.ps[i].ps_priv = ps;
2482 k = 0;
2483 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
2484 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2485 clock_array_index = idx[j];
2486 if (clock_array_index >= clock_info_array->ucNumEntries)
2487 continue;
2488 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
2489 break;
2490 clock_info = (union pplib_clock_info *)
2491 ((u8 *)&clock_info_array->clockInfo[0] +
2492 (clock_array_index * clock_info_array->ucEntrySize));
2493 kv_parse_pplib_clock_info(rdev,
2494 &rdev->pm.dpm.ps[i], k,
2495 clock_info);
2496 k++;
2497 }
2498 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2499 non_clock_info,
2500 non_clock_info_array->ucEntrySize);
2501 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2502 }
2503 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
2504 return 0;
2505}
2506
2507int kv_dpm_init(struct radeon_device *rdev)
2508{
2509 struct kv_power_info *pi;
2510 int ret, i;
2511
2512 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
2513 if (pi == NULL)
2514 return -ENOMEM;
2515 rdev->pm.dpm.priv = pi;
2516
2517 ret = r600_parse_extended_power_table(rdev);
2518 if (ret)
2519 return ret;
2520
2521 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2522 pi->at[i] = TRINITY_AT_DFLT;
2523
2524 pi->sram_end = SMC_RAM_END;
2525
2526 if (rdev->family == CHIP_KABINI)
2527 pi->high_voltage_t = 4001;
2528
2529 pi->enable_nb_dpm = true;
2530
2531 pi->caps_power_containment = true;
2532 pi->caps_cac = true;
2533 pi->enable_didt = false;
2534 if (pi->enable_didt) {
2535 pi->caps_sq_ramping = true;
2536 pi->caps_db_ramping = true;
2537 pi->caps_td_ramping = true;
2538 pi->caps_tcp_ramping = true;
2539 }
2540
2541 pi->caps_sclk_ds = true;
2542 pi->enable_auto_thermal_throttling = true;
2543 pi->disable_nb_ps3_in_battery = false;
2544 pi->bapm_enable = true;
2545 pi->voltage_drop_t = 0;
2546 pi->caps_sclk_throttle_low_notification = false;
2547 pi->caps_fps = false; /* true? */
2548 pi->caps_uvd_pg = true;
2549 pi->caps_uvd_dpm = true;
2550 pi->caps_vce_pg = false;
2551 pi->caps_samu_pg = false;
2552 pi->caps_acp_pg = false;
2553 pi->caps_stable_p_state = false;
2554
2555 ret = kv_parse_sys_info_table(rdev);
2556 if (ret)
2557 return ret;
2558
2559 kv_patch_voltage_values(rdev);
2560 kv_construct_boot_state(rdev);
2561
2562 ret = kv_parse_power_table(rdev);
2563 if (ret)
2564 return ret;
2565
2566 pi->enable_dpm = true;
2567
2568 return 0;
2569}
2570
2571void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2572 struct seq_file *m)
2573{
2574 struct kv_power_info *pi = kv_get_pi(rdev);
2575 u32 current_index =
2576 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2577 CURR_SCLK_INDEX_SHIFT;
2578 u32 sclk, tmp;
2579 u16 vddc;
2580
2581 if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2582 seq_printf(m, "invalid dpm profile %d\n", current_index);
2583 } else {
2584 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2585 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
2586 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
2587 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
2588 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
2589 current_index, sclk, vddc);
2590 }
2591}
2592
2593void kv_dpm_print_power_state(struct radeon_device *rdev,
2594 struct radeon_ps *rps)
2595{
2596 int i;
2597 struct kv_ps *ps = kv_get_ps(rps);
2598
2599 r600_dpm_print_class_info(rps->class, rps->class2);
2600 r600_dpm_print_cap_info(rps->caps);
2601 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2602 for (i = 0; i < ps->num_levels; i++) {
2603 struct kv_pl *pl = &ps->levels[i];
2604 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2605 i, pl->sclk,
2606 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
2607 }
2608 r600_dpm_print_ps_status(rdev, rps);
2609}
2610
2611void kv_dpm_fini(struct radeon_device *rdev)
2612{
2613 int i;
2614
2615 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2616 kfree(rdev->pm.dpm.ps[i].ps_priv);
2617 }
2618 kfree(rdev->pm.dpm.ps);
2619 kfree(rdev->pm.dpm.priv);
2620 r600_free_extended_power_table(rdev);
2621}
2622
2623void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
2624{
2625
2626}
2627
2628u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
2629{
2630 struct kv_power_info *pi = kv_get_pi(rdev);
2631 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
2632
2633 if (low)
2634 return requested_state->levels[0].sclk;
2635 else
2636 return requested_state->levels[requested_state->num_levels - 1].sclk;
2637}
2638
2639u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
2640{
2641 struct kv_power_info *pi = kv_get_pi(rdev);
2642
2643 return pi->sys_info.bootup_uma_clk;
2644}
2645
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
new file mode 100644
index 000000000000..32bb079572d7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __KV_DPM_H__
24#define __KV_DPM_H__
25
26#define SMU__NUM_SCLK_DPM_STATE 8
27#define SMU__NUM_MCLK_DPM_LEVELS 4
28#define SMU__NUM_LCLK_DPM_LEVELS 8
29#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */
30#include "smu7_fusion.h"
31#include "trinity_dpm.h"
32#include "ppsmc.h"
33
34#define KV_NUM_NBPSTATES 4
35
36enum kv_pt_config_reg_type {
37 KV_CONFIGREG_MMR = 0,
38 KV_CONFIGREG_SMC_IND,
39 KV_CONFIGREG_DIDT_IND,
40 KV_CONFIGREG_CACHE,
41 KV_CONFIGREG_MAX
42};
43
44struct kv_pt_config_reg {
45 u32 offset;
46 u32 mask;
47 u32 shift;
48 u32 value;
49 enum kv_pt_config_reg_type type;
50};
51
52struct kv_lcac_config_values {
53 u32 block_id;
54 u32 signal_id;
55 u32 t;
56};
57
58struct kv_lcac_config_reg {
59 u32 cntl;
60 u32 block_mask;
61 u32 block_shift;
62 u32 signal_mask;
63 u32 signal_shift;
64 u32 t_mask;
65 u32 t_shift;
66 u32 enable_mask;
67 u32 enable_shift;
68};
69
70struct kv_pl {
71 u32 sclk;
72 u8 vddc_index;
73 u8 ds_divider_index;
74 u8 ss_divider_index;
75 u8 allow_gnb_slow;
76 u8 force_nbp_state;
77 u8 display_wm;
78 u8 vce_wm;
79};
80
81struct kv_ps {
82 struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
83 u32 num_levels;
84 bool need_dfs_bypass;
85 u8 dpm0_pg_nb_ps_lo;
86 u8 dpm0_pg_nb_ps_hi;
87 u8 dpmx_nb_ps_lo;
88 u8 dpmx_nb_ps_hi;
89};
90
91struct kv_sys_info {
92 u32 bootup_uma_clk;
93 u32 bootup_sclk;
94 u32 dentist_vco_freq;
95 u32 nb_dpm_enable;
96 u32 nbp_memory_clock[KV_NUM_NBPSTATES];
97 u32 nbp_n_clock[KV_NUM_NBPSTATES];
98 u16 bootup_nb_voltage_index;
99 u8 htc_tmp_lmt;
100 u8 htc_hyst_lmt;
101 struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
102 struct sumo_vid_mapping_table vid_mapping_table;
103 u32 uma_channel_number;
104};
105
106struct kv_power_info {
107 u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
108 u32 voltage_drop_t;
109 struct kv_sys_info sys_info;
110 struct kv_pl boot_pl;
111 bool enable_nb_ps_policy;
112 bool disable_nb_ps3_in_battery;
113 bool video_start;
114 bool battery_state;
115 u32 lowest_valid;
116 u32 highest_valid;
117 u16 high_voltage_t;
118 bool cac_enabled;
119 bool bapm_enable;
120 /* smc offsets */
121 u32 sram_end;
122 u32 dpm_table_start;
123 u32 soft_regs_start;
124 /* dpm SMU tables */
125 u8 graphics_dpm_level_count;
126 u8 uvd_level_count;
127 u8 vce_level_count;
128 u8 acp_level_count;
129 u8 samu_level_count;
130 u16 fps_high_t;
131 SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE];
132 SMU7_Fusion_ACPILevel acpi_level;
133 SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD];
134 SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE];
135 SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP];
136 SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU];
137 u8 uvd_boot_level;
138 u8 vce_boot_level;
139 u8 acp_boot_level;
140 u8 samu_boot_level;
141 u8 uvd_interval;
142 u8 vce_interval;
143 u8 acp_interval;
144 u8 samu_interval;
145 u8 graphics_boot_level;
146 u8 graphics_interval;
147 u8 graphics_therm_throttle_enable;
148 u8 graphics_voltage_change_enable;
149 u8 graphics_clk_slow_enable;
150 u8 graphics_clk_slow_divider;
151 u8 fps_low_t;
152 u32 low_sclk_interrupt_t;
153 bool uvd_power_gated;
154 bool vce_power_gated;
155 bool acp_power_gated;
156 bool samu_power_gated;
157 bool nb_dpm_enabled;
158 /* flags */
159 bool enable_didt;
160 bool enable_dpm;
161 bool enable_auto_thermal_throttling;
162 bool enable_nb_dpm;
163 /* caps */
164 bool caps_cac;
165 bool caps_power_containment;
166 bool caps_sq_ramping;
167 bool caps_db_ramping;
168 bool caps_td_ramping;
169 bool caps_tcp_ramping;
170 bool caps_sclk_throttle_low_notification;
171 bool caps_fps;
172 bool caps_uvd_dpm;
173 bool caps_uvd_pg;
174 bool caps_vce_pg;
175 bool caps_samu_pg;
176 bool caps_acp_pg;
177 bool caps_stable_p_state;
178 bool caps_enable_dfs_bypass;
179 bool caps_sclk_ds;
180 struct radeon_ps current_rps;
181 struct kv_ps current_ps;
182 struct radeon_ps requested_rps;
183 struct kv_ps requested_ps;
184};
185
186
187/* kv_smc.c */
188int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id);
189int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask);
190int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
191 PPSMC_Msg msg, u32 parameter);
192int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
193 u32 *value, u32 limit);
194int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
195int kv_copy_bytes_to_smc(struct radeon_device *rdev,
196 u32 smc_start_address,
197 const u8 *src, u32 byte_count, u32 limit);
198
199#endif
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
new file mode 100644
index 000000000000..34a226d7e34a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -0,0 +1,207 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "cikd.h"
28#include "kv_dpm.h"
29
30int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id)
31{
32 u32 i;
33 u32 tmp = 0;
34
35 WREG32(SMC_MESSAGE_0, id & SMC_MSG_MASK);
36
37 for (i = 0; i < rdev->usec_timeout; i++) {
38 if ((RREG32(SMC_RESP_0) & SMC_RESP_MASK) != 0)
39 break;
40 udelay(1);
41 }
42 tmp = RREG32(SMC_RESP_0) & SMC_RESP_MASK;
43
44 if (tmp != 1) {
45 if (tmp == 0xFF)
46 return -EINVAL;
47 else if (tmp == 0xFE)
48 return -EINVAL;
49 }
50
51 return 0;
52}
53
54int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask)
55{
56 int ret;
57
58 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
59
60 if (ret == 0)
61 *enable_mask = RREG32_SMC(SMC_SYSCON_MSG_ARG_0);
62
63 return ret;
64}
65
66int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
67 PPSMC_Msg msg, u32 parameter)
68{
69
70 WREG32(SMC_MSG_ARG_0, parameter);
71
72 return kv_notify_message_to_smu(rdev, msg);
73}
74
75static int kv_set_smc_sram_address(struct radeon_device *rdev,
76 u32 smc_address, u32 limit)
77{
78 if (smc_address & 3)
79 return -EINVAL;
80 if ((smc_address + 3) > limit)
81 return -EINVAL;
82
83 WREG32(SMC_IND_INDEX_0, smc_address);
84 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
85
86 return 0;
87}
88
89int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
90 u32 *value, u32 limit)
91{
92 int ret;
93
94 ret = kv_set_smc_sram_address(rdev, smc_address, limit);
95 if (ret)
96 return ret;
97
98 *value = RREG32(SMC_IND_DATA_0);
99 return 0;
100}
101
102int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
103{
104 if (enable)
105 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Enable);
106 else
107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
108}
109
110int kv_copy_bytes_to_smc(struct radeon_device *rdev,
111 u32 smc_start_address,
112 const u8 *src, u32 byte_count, u32 limit)
113{
114 int ret;
115 u32 data, original_data, addr, extra_shift, t_byte, count, mask;
116
117 if ((smc_start_address + byte_count) > limit)
118 return -EINVAL;
119
120 addr = smc_start_address;
121 t_byte = addr & 3;
122
123 /* RMW for the initial bytes */
124 if (t_byte != 0) {
125 addr -= t_byte;
126
127 ret = kv_set_smc_sram_address(rdev, addr, limit);
128 if (ret)
129 return ret;
130
131 original_data = RREG32(SMC_IND_DATA_0);
132
133 data = 0;
134 mask = 0;
135 count = 4;
136 while (count > 0) {
137 if (t_byte > 0) {
138 mask = (mask << 8) | 0xff;
139 t_byte--;
140 } else if (byte_count > 0) {
141 data = (data << 8) + *src++;
142 byte_count--;
143 mask <<= 8;
144 } else {
145 data <<= 8;
146 mask = (mask << 8) | 0xff;
147 }
148 count--;
149 }
150
151 data |= original_data & mask;
152
153 ret = kv_set_smc_sram_address(rdev, addr, limit);
154 if (ret)
155 return ret;
156
157 WREG32(SMC_IND_DATA_0, data);
158
159 addr += 4;
160 }
161
162 while (byte_count >= 4) {
163 /* SMC address space is BE */
164 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
165
166 ret = kv_set_smc_sram_address(rdev, addr, limit);
167 if (ret)
168 return ret;
169
170 WREG32(SMC_IND_DATA_0, data);
171
172 src += 4;
173 byte_count -= 4;
174 addr += 4;
175 }
176
177 /* RMW for the final bytes */
178 if (byte_count > 0) {
179 data = 0;
180
181 ret = kv_set_smc_sram_address(rdev, addr, limit);
182 if (ret)
183 return ret;
184
185 original_data= RREG32(SMC_IND_DATA_0);
186
187 extra_shift = 8 * (4 - byte_count);
188
189 while (byte_count > 0) {
190 /* SMC address space is BE */
191 data = (data << 8) + *src++;
192 byte_count--;
193 }
194
195 data <<= extra_shift;
196
197 data |= (original_data & ~((~0UL) << extra_shift));
198
199 ret = kv_set_smc_sram_address(rdev, addr, limit);
200 if (ret)
201 return ret;
202
203 WREG32(SMC_IND_DATA_0, data);
204 }
205 return 0;
206}
207
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 56bd4f3be4fe..93c1f9ef5da9 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -35,7 +35,7 @@
35#include "radeon_ucode.h" 35#include "radeon_ucode.h"
36#include "clearstate_cayman.h" 36#include "clearstate_cayman.h"
37 37
38static u32 tn_rlc_save_restore_register_list[] = 38static const u32 tn_rlc_save_restore_register_list[] =
39{ 39{
40 0x98fc, 40 0x98fc,
41 0x98f0, 41 0x98f0,
@@ -160,7 +160,6 @@ static u32 tn_rlc_save_restore_register_list[] =
160 0x9830, 160 0x9830,
161 0x802c, 161 0x802c,
162}; 162};
163static u32 tn_rlc_save_restore_register_list_size = ARRAY_SIZE(tn_rlc_save_restore_register_list);
164 163
165extern bool evergreen_is_display_hung(struct radeon_device *rdev); 164extern bool evergreen_is_display_hung(struct radeon_device *rdev);
166extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 165extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
@@ -175,6 +174,11 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
175extern void evergreen_program_aspm(struct radeon_device *rdev); 174extern void evergreen_program_aspm(struct radeon_device *rdev);
176extern void sumo_rlc_fini(struct radeon_device *rdev); 175extern void sumo_rlc_fini(struct radeon_device *rdev);
177extern int sumo_rlc_init(struct radeon_device *rdev); 176extern int sumo_rlc_init(struct radeon_device *rdev);
177extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
178 struct radeon_ib *ib,
179 uint64_t pe,
180 uint64_t addr, unsigned count,
181 uint32_t incr, uint32_t flags);
178 182
179/* Firmware Names */ 183/* Firmware Names */
180MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 184MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -794,9 +798,13 @@ int ni_init_microcode(struct radeon_device *rdev)
794 if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { 798 if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
795 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 799 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
796 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 800 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
797 if (err) 801 if (err) {
798 goto out; 802 printk(KERN_ERR
799 if (rdev->smc_fw->size != smc_req_size) { 803 "smc: error loading firmware \"%s\"\n",
804 fw_name);
805 release_firmware(rdev->smc_fw);
806 rdev->smc_fw = NULL;
807 } else if (rdev->smc_fw->size != smc_req_size) {
800 printk(KERN_ERR 808 printk(KERN_ERR
801 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 809 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
802 rdev->mc_fw->size, fw_name); 810 rdev->mc_fw->size, fw_name);
@@ -1370,23 +1378,6 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1370 radeon_ring_write(ring, 10); /* poll interval */ 1378 radeon_ring_write(ring, 10); /* poll interval */
1371} 1379}
1372 1380
1373void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
1374 struct radeon_ring *ring,
1375 struct radeon_semaphore *semaphore,
1376 bool emit_wait)
1377{
1378 uint64_t addr = semaphore->gpu_addr;
1379
1380 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
1381 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
1382
1383 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
1384 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
1385
1386 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
1387 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
1388}
1389
1390static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1381static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1391{ 1382{
1392 if (enable) 1383 if (enable)
@@ -1560,8 +1551,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
1560 1551
1561 /* Set ring buffer size */ 1552 /* Set ring buffer size */
1562 ring = &rdev->ring[ridx[i]]; 1553 ring = &rdev->ring[ridx[i]];
1563 rb_cntl = drm_order(ring->ring_size / 8); 1554 rb_cntl = order_base_2(ring->ring_size / 8);
1564 rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; 1555 rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
1565#ifdef __BIG_ENDIAN 1556#ifdef __BIG_ENDIAN
1566 rb_cntl |= BUF_SWAP_32BIT; 1557 rb_cntl |= BUF_SWAP_32BIT;
1567#endif 1558#endif
@@ -1609,186 +1600,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
1609 return 0; 1600 return 0;
1610} 1601}
1611 1602
1612/* 1603u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1613 * DMA
1614 * Starting with R600, the GPU has an asynchronous
1615 * DMA engine. The programming model is very similar
1616 * to the 3D engine (ring buffer, IBs, etc.), but the
1617 * DMA controller has it's own packet format that is
1618 * different form the PM4 format used by the 3D engine.
1619 * It supports copying data, writing embedded data,
1620 * solid fills, and a number of other things. It also
1621 * has support for tiling/detiling of buffers.
1622 * Cayman and newer support two asynchronous DMA engines.
1623 */
1624/**
1625 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
1626 *
1627 * @rdev: radeon_device pointer
1628 * @ib: IB object to schedule
1629 *
1630 * Schedule an IB in the DMA ring (cayman-SI).
1631 */
1632void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
1633 struct radeon_ib *ib)
1634{
1635 struct radeon_ring *ring = &rdev->ring[ib->ring];
1636
1637 if (rdev->wb.enabled) {
1638 u32 next_rptr = ring->wptr + 4;
1639 while ((next_rptr & 7) != 5)
1640 next_rptr++;
1641 next_rptr += 3;
1642 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
1643 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1644 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
1645 radeon_ring_write(ring, next_rptr);
1646 }
1647
1648 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
1649 * Pad as necessary with NOPs.
1650 */
1651 while ((ring->wptr & 7) != 5)
1652 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1653 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
1654 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
1655 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
1656
1657}
1658
1659/**
1660 * cayman_dma_stop - stop the async dma engines
1661 *
1662 * @rdev: radeon_device pointer
1663 *
1664 * Stop the async dma engines (cayman-SI).
1665 */
1666void cayman_dma_stop(struct radeon_device *rdev)
1667{
1668 u32 rb_cntl;
1669
1670 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1671
1672 /* dma0 */
1673 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1674 rb_cntl &= ~DMA_RB_ENABLE;
1675 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
1676
1677 /* dma1 */
1678 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1679 rb_cntl &= ~DMA_RB_ENABLE;
1680 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
1681
1682 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1683 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1684}
1685
1686/**
1687 * cayman_dma_resume - setup and start the async dma engines
1688 *
1689 * @rdev: radeon_device pointer
1690 *
1691 * Set up the DMA ring buffers and enable them. (cayman-SI).
1692 * Returns 0 for success, error for failure.
1693 */
1694int cayman_dma_resume(struct radeon_device *rdev)
1695{
1696 struct radeon_ring *ring;
1697 u32 rb_cntl, dma_cntl, ib_cntl;
1698 u32 rb_bufsz;
1699 u32 reg_offset, wb_offset;
1700 int i, r;
1701
1702 /* Reset dma */
1703 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1704 RREG32(SRBM_SOFT_RESET);
1705 udelay(50);
1706 WREG32(SRBM_SOFT_RESET, 0);
1707
1708 for (i = 0; i < 2; i++) {
1709 if (i == 0) {
1710 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1711 reg_offset = DMA0_REGISTER_OFFSET;
1712 wb_offset = R600_WB_DMA_RPTR_OFFSET;
1713 } else {
1714 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1715 reg_offset = DMA1_REGISTER_OFFSET;
1716 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
1717 }
1718
1719 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
1720 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
1721
1722 /* Set ring buffer size in dwords */
1723 rb_bufsz = drm_order(ring->ring_size / 4);
1724 rb_cntl = rb_bufsz << 1;
1725#ifdef __BIG_ENDIAN
1726 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
1727#endif
1728 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
1729
1730 /* Initialize the ring buffer's read and write pointers */
1731 WREG32(DMA_RB_RPTR + reg_offset, 0);
1732 WREG32(DMA_RB_WPTR + reg_offset, 0);
1733
1734 /* set the wb address whether it's enabled or not */
1735 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
1736 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
1737 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
1738 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
1739
1740 if (rdev->wb.enabled)
1741 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
1742
1743 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
1744
1745 /* enable DMA IBs */
1746 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
1747#ifdef __BIG_ENDIAN
1748 ib_cntl |= DMA_IB_SWAP_ENABLE;
1749#endif
1750 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
1751
1752 dma_cntl = RREG32(DMA_CNTL + reg_offset);
1753 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
1754 WREG32(DMA_CNTL + reg_offset, dma_cntl);
1755
1756 ring->wptr = 0;
1757 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
1758
1759 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
1760
1761 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
1762
1763 ring->ready = true;
1764
1765 r = radeon_ring_test(rdev, ring->idx, ring);
1766 if (r) {
1767 ring->ready = false;
1768 return r;
1769 }
1770 }
1771
1772 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1773
1774 return 0;
1775}
1776
1777/**
1778 * cayman_dma_fini - tear down the async dma engines
1779 *
1780 * @rdev: radeon_device pointer
1781 *
1782 * Stop the async dma engines and free the rings (cayman-SI).
1783 */
1784void cayman_dma_fini(struct radeon_device *rdev)
1785{
1786 cayman_dma_stop(rdev);
1787 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1788 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1789}
1790
1791static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1792{ 1604{
1793 u32 reset_mask = 0; 1605 u32 reset_mask = 0;
1794 u32 tmp; 1606 u32 tmp;
@@ -2041,34 +1853,6 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2041 return radeon_ring_test_lockup(rdev, ring); 1853 return radeon_ring_test_lockup(rdev, ring);
2042} 1854}
2043 1855
2044/**
2045 * cayman_dma_is_lockup - Check if the DMA engine is locked up
2046 *
2047 * @rdev: radeon_device pointer
2048 * @ring: radeon_ring structure holding ring information
2049 *
2050 * Check if the async DMA engine is locked up.
2051 * Returns true if the engine appears to be locked up, false if not.
2052 */
2053bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2054{
2055 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
2056 u32 mask;
2057
2058 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
2059 mask = RADEON_RESET_DMA;
2060 else
2061 mask = RADEON_RESET_DMA1;
2062
2063 if (!(reset_mask & mask)) {
2064 radeon_ring_lockup_update(ring);
2065 return false;
2066 }
2067 /* force ring activities */
2068 radeon_ring_force_activity(rdev, ring);
2069 return radeon_ring_test_lockup(rdev, ring);
2070}
2071
2072static int cayman_startup(struct radeon_device *rdev) 1856static int cayman_startup(struct radeon_device *rdev)
2073{ 1857{
2074 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1858 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -2079,6 +1863,13 @@ static int cayman_startup(struct radeon_device *rdev)
2079 /* enable aspm */ 1863 /* enable aspm */
2080 evergreen_program_aspm(rdev); 1864 evergreen_program_aspm(rdev);
2081 1865
1866 /* scratch needs to be initialized before MC */
1867 r = r600_vram_scratch_init(rdev);
1868 if (r)
1869 return r;
1870
1871 evergreen_mc_program(rdev);
1872
2082 if (rdev->flags & RADEON_IS_IGP) { 1873 if (rdev->flags & RADEON_IS_IGP) {
2083 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1874 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2084 r = ni_init_microcode(rdev); 1875 r = ni_init_microcode(rdev);
@@ -2103,27 +1894,16 @@ static int cayman_startup(struct radeon_device *rdev)
2103 } 1894 }
2104 } 1895 }
2105 1896
2106 r = r600_vram_scratch_init(rdev);
2107 if (r)
2108 return r;
2109
2110 evergreen_mc_program(rdev);
2111 r = cayman_pcie_gart_enable(rdev); 1897 r = cayman_pcie_gart_enable(rdev);
2112 if (r) 1898 if (r)
2113 return r; 1899 return r;
2114 cayman_gpu_init(rdev); 1900 cayman_gpu_init(rdev);
2115 1901
2116 r = evergreen_blit_init(rdev);
2117 if (r) {
2118 r600_blit_fini(rdev);
2119 rdev->asic->copy.copy = NULL;
2120 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2121 }
2122
2123 /* allocate rlc buffers */ 1902 /* allocate rlc buffers */
2124 if (rdev->flags & RADEON_IS_IGP) { 1903 if (rdev->flags & RADEON_IS_IGP) {
2125 rdev->rlc.reg_list = tn_rlc_save_restore_register_list; 1904 rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
2126 rdev->rlc.reg_list_size = tn_rlc_save_restore_register_list_size; 1905 rdev->rlc.reg_list_size =
1906 (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
2127 rdev->rlc.cs_data = cayman_cs_data; 1907 rdev->rlc.cs_data = cayman_cs_data;
2128 r = sumo_rlc_init(rdev); 1908 r = sumo_rlc_init(rdev);
2129 if (r) { 1909 if (r) {
@@ -2143,7 +1923,7 @@ static int cayman_startup(struct radeon_device *rdev)
2143 return r; 1923 return r;
2144 } 1924 }
2145 1925
2146 r = rv770_uvd_resume(rdev); 1926 r = uvd_v2_2_resume(rdev);
2147 if (!r) { 1927 if (!r) {
2148 r = radeon_fence_driver_start_ring(rdev, 1928 r = radeon_fence_driver_start_ring(rdev,
2149 R600_RING_TYPE_UVD_INDEX); 1929 R600_RING_TYPE_UVD_INDEX);
@@ -2194,7 +1974,7 @@ static int cayman_startup(struct radeon_device *rdev)
2194 1974
2195 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1975 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2196 CP_RB0_RPTR, CP_RB0_WPTR, 1976 CP_RB0_RPTR, CP_RB0_WPTR,
2197 0, 0xfffff, RADEON_CP_PACKET2); 1977 RADEON_CP_PACKET2);
2198 if (r) 1978 if (r)
2199 return r; 1979 return r;
2200 1980
@@ -2202,7 +1982,7 @@ static int cayman_startup(struct radeon_device *rdev)
2202 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 1982 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2203 DMA_RB_RPTR + DMA0_REGISTER_OFFSET, 1983 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
2204 DMA_RB_WPTR + DMA0_REGISTER_OFFSET, 1984 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
2205 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1985 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2206 if (r) 1986 if (r)
2207 return r; 1987 return r;
2208 1988
@@ -2210,7 +1990,7 @@ static int cayman_startup(struct radeon_device *rdev)
2210 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 1990 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
2211 DMA_RB_RPTR + DMA1_REGISTER_OFFSET, 1991 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
2212 DMA_RB_WPTR + DMA1_REGISTER_OFFSET, 1992 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
2213 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1993 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2214 if (r) 1994 if (r)
2215 return r; 1995 return r;
2216 1996
@@ -2227,12 +2007,11 @@ static int cayman_startup(struct radeon_device *rdev)
2227 2007
2228 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 2008 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2229 if (ring->ring_size) { 2009 if (ring->ring_size) {
2230 r = radeon_ring_init(rdev, ring, ring->ring_size, 2010 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
2231 R600_WB_UVD_RPTR_OFFSET,
2232 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 2011 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
2233 0, 0xfffff, RADEON_CP_PACKET2); 2012 RADEON_CP_PACKET2);
2234 if (!r) 2013 if (!r)
2235 r = r600_uvd_init(rdev); 2014 r = uvd_v1_0_init(rdev);
2236 if (r) 2015 if (r)
2237 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 2016 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2238 } 2017 }
@@ -2249,9 +2028,15 @@ static int cayman_startup(struct radeon_device *rdev)
2249 return r; 2028 return r;
2250 } 2029 }
2251 2030
2252 r = r600_audio_init(rdev); 2031 if (ASIC_IS_DCE6(rdev)) {
2253 if (r) 2032 r = dce6_audio_init(rdev);
2254 return r; 2033 if (r)
2034 return r;
2035 } else {
2036 r = r600_audio_init(rdev);
2037 if (r)
2038 return r;
2039 }
2255 2040
2256 return 0; 2041 return 0;
2257} 2042}
@@ -2282,11 +2067,14 @@ int cayman_resume(struct radeon_device *rdev)
2282 2067
2283int cayman_suspend(struct radeon_device *rdev) 2068int cayman_suspend(struct radeon_device *rdev)
2284{ 2069{
2285 r600_audio_fini(rdev); 2070 if (ASIC_IS_DCE6(rdev))
2071 dce6_audio_fini(rdev);
2072 else
2073 r600_audio_fini(rdev);
2286 radeon_vm_manager_fini(rdev); 2074 radeon_vm_manager_fini(rdev);
2287 cayman_cp_enable(rdev, false); 2075 cayman_cp_enable(rdev, false);
2288 cayman_dma_stop(rdev); 2076 cayman_dma_stop(rdev);
2289 r600_uvd_rbc_stop(rdev); 2077 uvd_v1_0_fini(rdev);
2290 radeon_uvd_suspend(rdev); 2078 radeon_uvd_suspend(rdev);
2291 evergreen_irq_suspend(rdev); 2079 evergreen_irq_suspend(rdev);
2292 radeon_wb_disable(rdev); 2080 radeon_wb_disable(rdev);
@@ -2408,7 +2196,6 @@ int cayman_init(struct radeon_device *rdev)
2408 2196
2409void cayman_fini(struct radeon_device *rdev) 2197void cayman_fini(struct radeon_device *rdev)
2410{ 2198{
2411 r600_blit_fini(rdev);
2412 cayman_cp_fini(rdev); 2199 cayman_cp_fini(rdev);
2413 cayman_dma_fini(rdev); 2200 cayman_dma_fini(rdev);
2414 r600_irq_fini(rdev); 2201 r600_irq_fini(rdev);
@@ -2418,6 +2205,7 @@ void cayman_fini(struct radeon_device *rdev)
2418 radeon_vm_manager_fini(rdev); 2205 radeon_vm_manager_fini(rdev);
2419 radeon_ib_pool_fini(rdev); 2206 radeon_ib_pool_fini(rdev);
2420 radeon_irq_kms_fini(rdev); 2207 radeon_irq_kms_fini(rdev);
2208 uvd_v1_0_fini(rdev);
2421 radeon_uvd_fini(rdev); 2209 radeon_uvd_fini(rdev);
2422 cayman_pcie_gart_fini(rdev); 2210 cayman_pcie_gart_fini(rdev);
2423 r600_vram_scratch_fini(rdev); 2211 r600_vram_scratch_fini(rdev);
@@ -2678,61 +2466,7 @@ void cayman_vm_set_page(struct radeon_device *rdev,
2678 } 2466 }
2679 } 2467 }
2680 } else { 2468 } else {
2681 if ((flags & RADEON_VM_PAGE_SYSTEM) || 2469 cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
2682 (count == 1)) {
2683 while (count) {
2684 ndw = count * 2;
2685 if (ndw > 0xFFFFE)
2686 ndw = 0xFFFFE;
2687
2688 /* for non-physically contiguous pages (system) */
2689 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
2690 ib->ptr[ib->length_dw++] = pe;
2691 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2692 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2693 if (flags & RADEON_VM_PAGE_SYSTEM) {
2694 value = radeon_vm_map_gart(rdev, addr);
2695 value &= 0xFFFFFFFFFFFFF000ULL;
2696 } else if (flags & RADEON_VM_PAGE_VALID) {
2697 value = addr;
2698 } else {
2699 value = 0;
2700 }
2701 addr += incr;
2702 value |= r600_flags;
2703 ib->ptr[ib->length_dw++] = value;
2704 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2705 }
2706 }
2707 while (ib->length_dw & 0x7)
2708 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
2709 } else {
2710 while (count) {
2711 ndw = count * 2;
2712 if (ndw > 0xFFFFE)
2713 ndw = 0xFFFFE;
2714
2715 if (flags & RADEON_VM_PAGE_VALID)
2716 value = addr;
2717 else
2718 value = 0;
2719 /* for physically contiguous pages (vram) */
2720 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
2721 ib->ptr[ib->length_dw++] = pe; /* dst addr */
2722 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2723 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
2724 ib->ptr[ib->length_dw++] = 0;
2725 ib->ptr[ib->length_dw++] = value; /* value */
2726 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2727 ib->ptr[ib->length_dw++] = incr; /* increment size */
2728 ib->ptr[ib->length_dw++] = 0;
2729 pe += ndw * 4;
2730 addr += (ndw / 2) * incr;
2731 count -= ndw / 2;
2732 }
2733 }
2734 while (ib->length_dw & 0x7)
2735 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
2736 } 2470 }
2737} 2471}
2738 2472
@@ -2766,26 +2500,3 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2766 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 2500 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2767 radeon_ring_write(ring, 0x0); 2501 radeon_ring_write(ring, 0x0);
2768} 2502}
2769
2770void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2771{
2772 struct radeon_ring *ring = &rdev->ring[ridx];
2773
2774 if (vm == NULL)
2775 return;
2776
2777 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2778 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
2779 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
2780
2781 /* flush hdp cache */
2782 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2783 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
2784 radeon_ring_write(ring, 1);
2785
2786 /* bits 0-7 are the VM contexts0-7 */
2787 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2788 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
2789 radeon_ring_write(ring, 1 << vm->id);
2790}
2791
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
new file mode 100644
index 000000000000..dd6e9688fbef
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -0,0 +1,338 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "nid.h"
28
29u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
30
31/*
32 * DMA
33 * Starting with R600, the GPU has an asynchronous
34 * DMA engine. The programming model is very similar
35 * to the 3D engine (ring buffer, IBs, etc.), but the
36 * DMA controller has it's own packet format that is
37 * different form the PM4 format used by the 3D engine.
38 * It supports copying data, writing embedded data,
39 * solid fills, and a number of other things. It also
40 * has support for tiling/detiling of buffers.
41 * Cayman and newer support two asynchronous DMA engines.
42 */
43
44/**
45 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
46 *
47 * @rdev: radeon_device pointer
48 * @ib: IB object to schedule
49 *
50 * Schedule an IB in the DMA ring (cayman-SI).
51 */
52void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
53 struct radeon_ib *ib)
54{
55 struct radeon_ring *ring = &rdev->ring[ib->ring];
56
57 if (rdev->wb.enabled) {
58 u32 next_rptr = ring->wptr + 4;
59 while ((next_rptr & 7) != 5)
60 next_rptr++;
61 next_rptr += 3;
62 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
63 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
64 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
65 radeon_ring_write(ring, next_rptr);
66 }
67
68 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
69 * Pad as necessary with NOPs.
70 */
71 while ((ring->wptr & 7) != 5)
72 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
73 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
74 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
75 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
76
77}
78
79/**
80 * cayman_dma_stop - stop the async dma engines
81 *
82 * @rdev: radeon_device pointer
83 *
84 * Stop the async dma engines (cayman-SI).
85 */
86void cayman_dma_stop(struct radeon_device *rdev)
87{
88 u32 rb_cntl;
89
90 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
91
92 /* dma0 */
93 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
94 rb_cntl &= ~DMA_RB_ENABLE;
95 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
96
97 /* dma1 */
98 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
99 rb_cntl &= ~DMA_RB_ENABLE;
100 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
101
102 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
103 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
104}
105
106/**
107 * cayman_dma_resume - setup and start the async dma engines
108 *
109 * @rdev: radeon_device pointer
110 *
111 * Set up the DMA ring buffers and enable them. (cayman-SI).
112 * Returns 0 for success, error for failure.
113 */
114int cayman_dma_resume(struct radeon_device *rdev)
115{
116 struct radeon_ring *ring;
117 u32 rb_cntl, dma_cntl, ib_cntl;
118 u32 rb_bufsz;
119 u32 reg_offset, wb_offset;
120 int i, r;
121
122 /* Reset dma */
123 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
124 RREG32(SRBM_SOFT_RESET);
125 udelay(50);
126 WREG32(SRBM_SOFT_RESET, 0);
127
128 for (i = 0; i < 2; i++) {
129 if (i == 0) {
130 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
131 reg_offset = DMA0_REGISTER_OFFSET;
132 wb_offset = R600_WB_DMA_RPTR_OFFSET;
133 } else {
134 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
135 reg_offset = DMA1_REGISTER_OFFSET;
136 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
137 }
138
139 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
140 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
141
142 /* Set ring buffer size in dwords */
143 rb_bufsz = order_base_2(ring->ring_size / 4);
144 rb_cntl = rb_bufsz << 1;
145#ifdef __BIG_ENDIAN
146 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
147#endif
148 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
149
150 /* Initialize the ring buffer's read and write pointers */
151 WREG32(DMA_RB_RPTR + reg_offset, 0);
152 WREG32(DMA_RB_WPTR + reg_offset, 0);
153
154 /* set the wb address whether it's enabled or not */
155 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
156 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
157 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
158 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
159
160 if (rdev->wb.enabled)
161 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
162
163 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
164
165 /* enable DMA IBs */
166 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
167#ifdef __BIG_ENDIAN
168 ib_cntl |= DMA_IB_SWAP_ENABLE;
169#endif
170 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
171
172 dma_cntl = RREG32(DMA_CNTL + reg_offset);
173 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
174 WREG32(DMA_CNTL + reg_offset, dma_cntl);
175
176 ring->wptr = 0;
177 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
178
179 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
180
181 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
182
183 ring->ready = true;
184
185 r = radeon_ring_test(rdev, ring->idx, ring);
186 if (r) {
187 ring->ready = false;
188 return r;
189 }
190 }
191
192 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
193
194 return 0;
195}
196
197/**
198 * cayman_dma_fini - tear down the async dma engines
199 *
200 * @rdev: radeon_device pointer
201 *
202 * Stop the async dma engines and free the rings (cayman-SI).
203 */
204void cayman_dma_fini(struct radeon_device *rdev)
205{
206 cayman_dma_stop(rdev);
207 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
208 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
209}
210
211/**
212 * cayman_dma_is_lockup - Check if the DMA engine is locked up
213 *
214 * @rdev: radeon_device pointer
215 * @ring: radeon_ring structure holding ring information
216 *
217 * Check if the async DMA engine is locked up.
218 * Returns true if the engine appears to be locked up, false if not.
219 */
220bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
221{
222 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
223 u32 mask;
224
225 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
226 mask = RADEON_RESET_DMA;
227 else
228 mask = RADEON_RESET_DMA1;
229
230 if (!(reset_mask & mask)) {
231 radeon_ring_lockup_update(ring);
232 return false;
233 }
234 /* force ring activities */
235 radeon_ring_force_activity(rdev, ring);
236 return radeon_ring_test_lockup(rdev, ring);
237}
238
239/**
240 * cayman_dma_vm_set_page - update the page tables using the DMA
241 *
242 * @rdev: radeon_device pointer
243 * @ib: indirect buffer to fill with commands
244 * @pe: addr of the page entry
245 * @addr: dst addr to write into pe
246 * @count: number of page entries to update
247 * @incr: increase next addr by incr bytes
248 * @flags: access flags
249 * @r600_flags: hw access flags
250 *
251 * Update the page tables using the DMA (cayman/TN).
252 */
253void cayman_dma_vm_set_page(struct radeon_device *rdev,
254 struct radeon_ib *ib,
255 uint64_t pe,
256 uint64_t addr, unsigned count,
257 uint32_t incr, uint32_t flags)
258{
259 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
260 uint64_t value;
261 unsigned ndw;
262
263 if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
264 while (count) {
265 ndw = count * 2;
266 if (ndw > 0xFFFFE)
267 ndw = 0xFFFFE;
268
269 /* for non-physically contiguous pages (system) */
270 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
271 ib->ptr[ib->length_dw++] = pe;
272 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
273 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
274 if (flags & RADEON_VM_PAGE_SYSTEM) {
275 value = radeon_vm_map_gart(rdev, addr);
276 value &= 0xFFFFFFFFFFFFF000ULL;
277 } else if (flags & RADEON_VM_PAGE_VALID) {
278 value = addr;
279 } else {
280 value = 0;
281 }
282 addr += incr;
283 value |= r600_flags;
284 ib->ptr[ib->length_dw++] = value;
285 ib->ptr[ib->length_dw++] = upper_32_bits(value);
286 }
287 }
288 } else {
289 while (count) {
290 ndw = count * 2;
291 if (ndw > 0xFFFFE)
292 ndw = 0xFFFFE;
293
294 if (flags & RADEON_VM_PAGE_VALID)
295 value = addr;
296 else
297 value = 0;
298 /* for physically contiguous pages (vram) */
299 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
300 ib->ptr[ib->length_dw++] = pe; /* dst addr */
301 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
302 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
303 ib->ptr[ib->length_dw++] = 0;
304 ib->ptr[ib->length_dw++] = value; /* value */
305 ib->ptr[ib->length_dw++] = upper_32_bits(value);
306 ib->ptr[ib->length_dw++] = incr; /* increment size */
307 ib->ptr[ib->length_dw++] = 0;
308 pe += ndw * 4;
309 addr += (ndw / 2) * incr;
310 count -= ndw / 2;
311 }
312 }
313 while (ib->length_dw & 0x7)
314 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
315}
316
317void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
318{
319 struct radeon_ring *ring = &rdev->ring[ridx];
320
321 if (vm == NULL)
322 return;
323
324 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
325 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
326 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
327
328 /* flush hdp cache */
329 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
330 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
331 radeon_ring_write(ring, 1);
332
333 /* bits 0-7 are the VM contexts0-7 */
334 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
335 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
336 radeon_ring_write(ring, 1 << vm->id);
337}
338
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 559cf24d51af..f7b625c9e0e9 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -769,7 +769,8 @@ bool ni_dpm_vblank_too_short(struct radeon_device *rdev)
769{ 769{
770 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 770 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
771 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 771 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
772 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 772 /* we never hit the non-gddr5 limit so disable it */
773 u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
773 774
774 if (vblank_time < switch_limit) 775 if (vblank_time < switch_limit)
775 return true; 776 return true;
@@ -1054,10 +1055,6 @@ static int ni_restrict_performance_levels_before_switch(struct radeon_device *rd
1054int ni_dpm_force_performance_level(struct radeon_device *rdev, 1055int ni_dpm_force_performance_level(struct radeon_device *rdev,
1055 enum radeon_dpm_forced_level level) 1056 enum radeon_dpm_forced_level level)
1056{ 1057{
1057 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
1058 struct ni_ps *ps = ni_get_ps(rps);
1059 u32 levels;
1060
1061 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1058 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1062 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) 1059 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
1063 return -EINVAL; 1060 return -EINVAL;
@@ -1068,8 +1065,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev,
1068 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 1065 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
1069 return -EINVAL; 1066 return -EINVAL;
1070 1067
1071 levels = ps->performance_level_count - 1; 1068 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
1072 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
1073 return -EINVAL; 1069 return -EINVAL;
1074 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 1070 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
1075 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 1071 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
@@ -4042,6 +4038,7 @@ static int ni_parse_power_table(struct radeon_device *rdev)
4042 (power_state->v1.ucNonClockStateIndex * 4038 (power_state->v1.ucNonClockStateIndex *
4043 power_info->pplib.ucNonClockSize)); 4039 power_info->pplib.ucNonClockSize));
4044 if (power_info->pplib.ucStateEntrySize - 1) { 4040 if (power_info->pplib.ucStateEntrySize - 1) {
4041 u8 *idx;
4045 ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL); 4042 ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
4046 if (ps == NULL) { 4043 if (ps == NULL) {
4047 kfree(rdev->pm.dpm.ps); 4044 kfree(rdev->pm.dpm.ps);
@@ -4051,12 +4048,12 @@ static int ni_parse_power_table(struct radeon_device *rdev)
4051 ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 4048 ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4052 non_clock_info, 4049 non_clock_info,
4053 power_info->pplib.ucNonClockSize); 4050 power_info->pplib.ucNonClockSize);
4051 idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
4054 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 4052 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
4055 clock_info = (union pplib_clock_info *) 4053 clock_info = (union pplib_clock_info *)
4056 (mode_info->atom_context->bios + data_offset + 4054 (mode_info->atom_context->bios + data_offset +
4057 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 4055 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
4058 (power_state->v1.ucClockStateIndices[j] * 4056 (idx[j] * power_info->pplib.ucClockInfoSize));
4059 power_info->pplib.ucClockInfoSize));
4060 ni_parse_pplib_clock_info(rdev, 4057 ni_parse_pplib_clock_info(rdev,
4061 &rdev->pm.dpm.ps[i], j, 4058 &rdev->pm.dpm.ps[i], j,
4062 clock_info); 4059 clock_info);
@@ -4072,9 +4069,6 @@ int ni_dpm_init(struct radeon_device *rdev)
4072 struct rv7xx_power_info *pi; 4069 struct rv7xx_power_info *pi;
4073 struct evergreen_power_info *eg_pi; 4070 struct evergreen_power_info *eg_pi;
4074 struct ni_power_info *ni_pi; 4071 struct ni_power_info *ni_pi;
4075 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
4076 u16 data_offset, size;
4077 u8 frev, crev;
4078 struct atom_clock_dividers dividers; 4072 struct atom_clock_dividers dividers;
4079 int ret; 4073 int ret;
4080 4074
@@ -4167,16 +4161,7 @@ int ni_dpm_init(struct radeon_device *rdev)
4167 eg_pi->vddci_control = 4161 eg_pi->vddci_control =
4168 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); 4162 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
4169 4163
4170 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 4164 rv770_get_engine_memory_ss(rdev);
4171 &frev, &crev, &data_offset)) {
4172 pi->sclk_ss = true;
4173 pi->mclk_ss = true;
4174 pi->dynamic_ss = true;
4175 } else {
4176 pi->sclk_ss = false;
4177 pi->mclk_ss = false;
4178 pi->dynamic_ss = true;
4179 }
4180 4165
4181 pi->asi = RV770_ASI_DFLT; 4166 pi->asi = RV770_ASI_DFLT;
4182 pi->pasi = CYPRESS_HASI_DFLT; 4167 pi->pasi = CYPRESS_HASI_DFLT;
@@ -4193,8 +4178,7 @@ int ni_dpm_init(struct radeon_device *rdev)
4193 4178
4194 pi->dynamic_pcie_gen2 = true; 4179 pi->dynamic_pcie_gen2 = true;
4195 4180
4196 if (pi->gfx_clock_gating && 4181 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
4197 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
4198 pi->thermal_protection = true; 4182 pi->thermal_protection = true;
4199 else 4183 else
4200 pi->thermal_protection = false; 4184 pi->thermal_protection = false;
@@ -4288,6 +4272,12 @@ int ni_dpm_init(struct radeon_device *rdev)
4288 4272
4289 ni_pi->use_power_boost_limit = true; 4273 ni_pi->use_power_boost_limit = true;
4290 4274
4275 /* make sure dc limits are valid */
4276 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
4277 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
4278 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
4279 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4280
4291 return 0; 4281 return 0;
4292} 4282}
4293 4283
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index b5564a3645d2..682842804bce 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -99,11 +99,68 @@ typedef uint8_t PPSMC_Result;
99#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) 99#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
100#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) 100#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
101 101
102/* CI/KV/KB */
103#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
104#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
105#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
106#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
107#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
108#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
109#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
110#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
111#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
112#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
113#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
114#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
115#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
116#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
117#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
118#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
119#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
120#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
121#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
122#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
123#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
124#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
125#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
126#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
127#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
128#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
129#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
130#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
131#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
132#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
133#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
134#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
135#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
136#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
137#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
138#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
139#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
140#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
141#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
142#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
143#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
144#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
145#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
146#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
147#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
148#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
149#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
150#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
151#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
152
153#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
154#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
155
102/* TN */ 156/* TN */
103#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102) 157#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
104#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104) 158#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
105#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108) 159#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
106#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112) 160#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
161#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
162#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
163#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
107#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) 164#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
108#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) 165#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
109#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) 166#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
new file mode 100644
index 000000000000..da43ab328833
--- /dev/null
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -0,0 +1,682 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _PPTABLE_H
24#define _PPTABLE_H
25
26#pragma pack(push, 1)
27
28typedef struct _ATOM_PPLIB_THERMALCONTROLLER
29
30{
31 UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
32 UCHAR ucI2cLine; // as interpreted by DAL I2C
33 UCHAR ucI2cAddress;
34 UCHAR ucFanParameters; // Fan Control Parameters.
35 UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
36 UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
37 UCHAR ucReserved; // ----
38 UCHAR ucFlags; // to be defined
39} ATOM_PPLIB_THERMALCONTROLLER;
40
41#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
42#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
43
44#define ATOM_PP_THERMALCONTROLLER_NONE 0
45#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
46#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
47#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
48#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
49#define ATOM_PP_THERMALCONTROLLER_LM64 5
50#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
51#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
52#define ATOM_PP_THERMALCONTROLLER_RV770 8
53#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
54#define ATOM_PP_THERMALCONTROLLER_KONG 10
55#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
56#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
57#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
58#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
59#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
60#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
61#define ATOM_PP_THERMALCONTROLLER_LM96163 17
62#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
63#define ATOM_PP_THERMALCONTROLLER_KAVERI 19
64
65
66// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
67// We probably should reserve the bit 0x80 for this use.
68// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
69// The driver can pick the correct internal controller based on the ASIC.
70
71#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
72#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
73
74typedef struct _ATOM_PPLIB_STATE
75{
76 UCHAR ucNonClockStateIndex;
77 UCHAR ucClockStateIndices[1]; // variable-sized
78} ATOM_PPLIB_STATE;
79
80
81typedef struct _ATOM_PPLIB_FANTABLE
82{
83 UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
84 UCHAR ucTHyst; // Temperature hysteresis. Integer.
85 USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
86 USHORT usTMed; // The middle temperature where we change slopes.
87 USHORT usTHigh; // The high point above TMed for adjusting the second slope.
88 USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
89 USHORT usPWMMed; // The PWM value (in percent) at TMed.
90 USHORT usPWMHigh; // The PWM value at THigh.
91} ATOM_PPLIB_FANTABLE;
92
93typedef struct _ATOM_PPLIB_FANTABLE2
94{
95 ATOM_PPLIB_FANTABLE basicTable;
96 USHORT usTMax; // The max temperature
97} ATOM_PPLIB_FANTABLE2;
98
99typedef struct _ATOM_PPLIB_EXTENDEDHEADER
100{
101 USHORT usSize;
102 ULONG ulMaxEngineClock; // For Overdrive.
103 ULONG ulMaxMemoryClock; // For Overdrive.
104 // Add extra system parameters here, always adjust size to include all fields.
105 USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
106 USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
107 USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
108 USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
109 USHORT usACPTableOffset; //points to ATOM_PPLIB_ACP_Table
110 USHORT usPowerTuneTableOffset; //points to ATOM_PPLIB_POWERTUNE_Table
111} ATOM_PPLIB_EXTENDEDHEADER;
112
113//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
114#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
115#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
116#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
117#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
118#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
119#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
120#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
121#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
122#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
123#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
124#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
125#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
126#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
127#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
128#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
129#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
130#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
131#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
132#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
133#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
134#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
135#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
136#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature.
137#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000
138
139typedef struct _ATOM_PPLIB_POWERPLAYTABLE
140{
141 ATOM_COMMON_TABLE_HEADER sHeader;
142
143 UCHAR ucDataRevision;
144
145 UCHAR ucNumStates;
146 UCHAR ucStateEntrySize;
147 UCHAR ucClockInfoSize;
148 UCHAR ucNonClockSize;
149
150 // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
151 USHORT usStateArrayOffset;
152
153 // offset from start of this table to array of ASIC-specific structures,
154 // currently ATOM_PPLIB_CLOCK_INFO.
155 USHORT usClockInfoArrayOffset;
156
157 // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
158 USHORT usNonClockInfoArrayOffset;
159
160 USHORT usBackbiasTime; // in microseconds
161 USHORT usVoltageTime; // in microseconds
162 USHORT usTableSize; //the size of this structure, or the extended structure
163
164 ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
165
166 ATOM_PPLIB_THERMALCONTROLLER sThermalController;
167
168 USHORT usBootClockInfoOffset;
169 USHORT usBootNonClockInfoOffset;
170
171} ATOM_PPLIB_POWERPLAYTABLE;
172
173typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
174{
175 ATOM_PPLIB_POWERPLAYTABLE basicTable;
176 UCHAR ucNumCustomThermalPolicy;
177 USHORT usCustomThermalPolicyArrayOffset;
178}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
179
180typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
181{
182 ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
183 USHORT usFormatID; // To be used ONLY by PPGen.
184 USHORT usFanTableOffset;
185 USHORT usExtendendedHeaderOffset;
186} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
187
188typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
189{
190 ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
191 ULONG ulGoldenPPID; // PPGen use only
192 ULONG ulGoldenRevision; // PPGen use only
193 USHORT usVddcDependencyOnSCLKOffset;
194 USHORT usVddciDependencyOnMCLKOffset;
195 USHORT usVddcDependencyOnMCLKOffset;
196 USHORT usMaxClockVoltageOnDCOffset;
197 USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
198 USHORT usMvddDependencyOnMCLKOffset;
199} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
200
201typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
202{
203 ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
204 ULONG ulTDPLimit;
205 ULONG ulNearTDPLimit;
206 ULONG ulSQRampingThreshold;
207 USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
208 ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
209 USHORT usTDPODLimit;
210 USHORT usLoadLineSlope; // in milliOhms * 100
211} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
212
213//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
214#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
215#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
216#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
217#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
218#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
219#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
220// 2, 4, 6, 7 are reserved
221
222#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
223#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
224#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
225#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
226#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
227#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
228#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
229#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
230#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
231#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
232#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
233#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
234#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
235
236//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
237#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
238#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
239#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
240
241//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
242#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
243#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
244
245// 0 is 2.5Gb/s, 1 is 5Gb/s
246#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
247#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
248
249// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
250#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
251#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
252
253// lookup into reduced refresh-rate table
254#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
255#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
256
257#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
258#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
259// 2-15 TBD as needed.
260
261#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
262#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
263
264#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
265
266#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
267
268//memory related flags
269#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
270
271//M3 Arb //2bits, current 3 sets of parameters in total
272#define ATOM_PPLIB_M3ARB_MASK 0x00060000
273#define ATOM_PPLIB_M3ARB_SHIFT 17
274
275#define ATOM_PPLIB_ENABLE_DRR 0x00080000
276
277// remaining 16 bits are reserved
278typedef struct _ATOM_PPLIB_THERMAL_STATE
279{
280 UCHAR ucMinTemperature;
281 UCHAR ucMaxTemperature;
282 UCHAR ucThermalAction;
283}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
284
285// Contained in an array starting at the offset
286// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
287// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
288#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
289#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
290typedef struct _ATOM_PPLIB_NONCLOCK_INFO
291{
292 USHORT usClassification;
293 UCHAR ucMinTemperature;
294 UCHAR ucMaxTemperature;
295 ULONG ulCapsAndSettings;
296 UCHAR ucRequiredPower;
297 USHORT usClassification2;
298 ULONG ulVCLK;
299 ULONG ulDCLK;
300 UCHAR ucUnused[5];
301} ATOM_PPLIB_NONCLOCK_INFO;
302
303// Contained in an array starting at the offset
304// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
305// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
306typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
307{
308 USHORT usEngineClockLow;
309 UCHAR ucEngineClockHigh;
310
311 USHORT usMemoryClockLow;
312 UCHAR ucMemoryClockHigh;
313
314 USHORT usVDDC;
315 USHORT usUnused1;
316 USHORT usUnused2;
317
318 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
319
320} ATOM_PPLIB_R600_CLOCK_INFO;
321
322// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
323#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
324#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
325#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
326#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
327#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
328#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
329
330typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
331
332{
333 USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
334 UCHAR ucLowEngineClockHigh;
335 USHORT usHighEngineClockLow; // High Engine clock in MHz.
336 UCHAR ucHighEngineClockHigh;
337 USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
338 UCHAR ucMemoryClockHigh; // Currentyl unused.
339 UCHAR ucPadding; // For proper alignment and size.
340 USHORT usVDDC; // For the 780, use: None, Low, High, Variable
341 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
342 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could
343 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
344 ULONG ulFlags;
345} ATOM_PPLIB_RS780_CLOCK_INFO;
346
347#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
348#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
349#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
350#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
351
352#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
353#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
354#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
355
356#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
357#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
358#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
359
360typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
361{
362 USHORT usEngineClockLow;
363 UCHAR ucEngineClockHigh;
364
365 USHORT usMemoryClockLow;
366 UCHAR ucMemoryClockHigh;
367
368 USHORT usVDDC;
369 USHORT usVDDCI;
370 USHORT usUnused;
371
372 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
373
374} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
375
376typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
377{
378 USHORT usEngineClockLow;
379 UCHAR ucEngineClockHigh;
380
381 USHORT usMemoryClockLow;
382 UCHAR ucMemoryClockHigh;
383
384 USHORT usVDDC;
385 USHORT usVDDCI;
386 UCHAR ucPCIEGen;
387 UCHAR ucUnused1;
388
389 ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
390
391} ATOM_PPLIB_SI_CLOCK_INFO;
392
393typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
394{
395 USHORT usEngineClockLow;
396 UCHAR ucEngineClockHigh;
397
398 USHORT usMemoryClockLow;
399 UCHAR ucMemoryClockHigh;
400
401 UCHAR ucPCIEGen;
402 USHORT usPCIELane;
403} ATOM_PPLIB_CI_CLOCK_INFO;
404
405typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
406 USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
407 UCHAR ucEngineClockHigh; //clockfrequency >> 16.
408 UCHAR vddcIndex; //2-bit vddc index;
409 USHORT tdpLimit;
410 //please initalize to 0
411 USHORT rsv1;
412 //please initialize to 0s
413 ULONG rsv2[2];
414}ATOM_PPLIB_SUMO_CLOCK_INFO;
415
416typedef struct _ATOM_PPLIB_STATE_V2
417{
418 //number of valid dpm levels in this state; Driver uses it to calculate the whole
419 //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
420 UCHAR ucNumDPMLevels;
421
422 //a index to the array of nonClockInfos
423 UCHAR nonClockInfoIndex;
424 /**
425 * Driver will read the first ucNumDPMLevels in this array
426 */
427 UCHAR clockInfoIndex[1];
428} ATOM_PPLIB_STATE_V2;
429
430typedef struct _StateArray{
431 //how many states we have
432 UCHAR ucNumEntries;
433
434 ATOM_PPLIB_STATE_V2 states[1];
435}StateArray;
436
437
438typedef struct _ClockInfoArray{
439 //how many clock levels we have
440 UCHAR ucNumEntries;
441
442 //sizeof(ATOM_PPLIB_CLOCK_INFO)
443 UCHAR ucEntrySize;
444
445 UCHAR clockInfo[1];
446}ClockInfoArray;
447
448typedef struct _NonClockInfoArray{
449
450 //how many non-clock levels we have. normally should be same as number of states
451 UCHAR ucNumEntries;
452 //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
453 UCHAR ucEntrySize;
454
455 ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
456}NonClockInfoArray;
457
458typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
459{
460 USHORT usClockLow;
461 UCHAR ucClockHigh;
462 USHORT usVoltage;
463}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
464
465typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
466{
467 UCHAR ucNumEntries; // Number of entries.
468 ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
469}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
470
471typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
472{
473 USHORT usSclkLow;
474 UCHAR ucSclkHigh;
475 USHORT usMclkLow;
476 UCHAR ucMclkHigh;
477 USHORT usVddc;
478 USHORT usVddci;
479}ATOM_PPLIB_Clock_Voltage_Limit_Record;
480
481typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
482{
483 UCHAR ucNumEntries; // Number of entries.
484 ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
485}ATOM_PPLIB_Clock_Voltage_Limit_Table;
486
487union _ATOM_PPLIB_CAC_Leakage_Record
488{
489 struct
490 {
491 USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd
492 ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd
493
494 };
495 struct
496 {
497 USHORT usVddc1;
498 USHORT usVddc2;
499 USHORT usVddc3;
500 };
501};
502
503typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
504
505typedef struct _ATOM_PPLIB_CAC_Leakage_Table
506{
507 UCHAR ucNumEntries; // Number of entries.
508 ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
509}ATOM_PPLIB_CAC_Leakage_Table;
510
511typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
512{
513 USHORT usVoltage;
514 USHORT usSclkLow;
515 UCHAR ucSclkHigh;
516 USHORT usMclkLow;
517 UCHAR ucMclkHigh;
518}ATOM_PPLIB_PhaseSheddingLimits_Record;
519
520typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
521{
522 UCHAR ucNumEntries; // Number of entries.
523 ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
524}ATOM_PPLIB_PhaseSheddingLimits_Table;
525
526typedef struct _VCEClockInfo{
527 USHORT usEVClkLow;
528 UCHAR ucEVClkHigh;
529 USHORT usECClkLow;
530 UCHAR ucECClkHigh;
531}VCEClockInfo;
532
533typedef struct _VCEClockInfoArray{
534 UCHAR ucNumEntries;
535 VCEClockInfo entries[1];
536}VCEClockInfoArray;
537
538typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
539{
540 USHORT usVoltage;
541 UCHAR ucVCEClockInfoIndex;
542}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
543
544typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
545{
546 UCHAR numEntries;
547 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
548}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
549
550typedef struct _ATOM_PPLIB_VCE_State_Record
551{
552 UCHAR ucVCEClockInfoIndex;
553 UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
554}ATOM_PPLIB_VCE_State_Record;
555
556typedef struct _ATOM_PPLIB_VCE_State_Table
557{
558 UCHAR numEntries;
559 ATOM_PPLIB_VCE_State_Record entries[1];
560}ATOM_PPLIB_VCE_State_Table;
561
562
563typedef struct _ATOM_PPLIB_VCE_Table
564{
565 UCHAR revid;
566// VCEClockInfoArray array;
567// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
568// ATOM_PPLIB_VCE_State_Table states;
569}ATOM_PPLIB_VCE_Table;
570
571
572typedef struct _UVDClockInfo{
573 USHORT usVClkLow;
574 UCHAR ucVClkHigh;
575 USHORT usDClkLow;
576 UCHAR ucDClkHigh;
577}UVDClockInfo;
578
579typedef struct _UVDClockInfoArray{
580 UCHAR ucNumEntries;
581 UVDClockInfo entries[1];
582}UVDClockInfoArray;
583
584typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
585{
586 USHORT usVoltage;
587 UCHAR ucUVDClockInfoIndex;
588}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
589
590typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
591{
592 UCHAR numEntries;
593 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
594}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
595
596typedef struct _ATOM_PPLIB_UVD_Table
597{
598 UCHAR revid;
599// UVDClockInfoArray array;
600// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
601}ATOM_PPLIB_UVD_Table;
602
603typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
604{
605 USHORT usVoltage;
606 USHORT usSAMClockLow;
607 UCHAR ucSAMClockHigh;
608}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
609
610typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
611 UCHAR numEntries;
612 ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
613}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
614
615typedef struct _ATOM_PPLIB_SAMU_Table
616{
617 UCHAR revid;
618 ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
619}ATOM_PPLIB_SAMU_Table;
620
621typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
622{
623 USHORT usVoltage;
624 USHORT usACPClockLow;
625 UCHAR ucACPClockHigh;
626}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
627
628typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
629 UCHAR numEntries;
630 ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
631}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
632
633typedef struct _ATOM_PPLIB_ACP_Table
634{
635 UCHAR revid;
636 ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits;
637}ATOM_PPLIB_ACP_Table;
638
639typedef struct _ATOM_PowerTune_Table{
640 USHORT usTDP;
641 USHORT usConfigurableTDP;
642 USHORT usTDC;
643 USHORT usBatteryPowerLimit;
644 USHORT usSmallPowerLimit;
645 USHORT usLowCACLeakage;
646 USHORT usHighCACLeakage;
647}ATOM_PowerTune_Table;
648
649typedef struct _ATOM_PPLIB_POWERTUNE_Table
650{
651 UCHAR revid;
652 ATOM_PowerTune_Table power_tune_table;
653}ATOM_PPLIB_POWERTUNE_Table;
654
655typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
656{
657 UCHAR revid;
658 ATOM_PowerTune_Table power_tune_table;
659 USHORT usMaximumPowerDeliveryLimit;
660 USHORT usReserve[7];
661} ATOM_PPLIB_POWERTUNE_Table_V1;
662
663#define ATOM_PPM_A_A 1
664#define ATOM_PPM_A_I 2
665typedef struct _ATOM_PPLIB_PPM_Table
666{
667 UCHAR ucRevId;
668 UCHAR ucPpmDesign; //A+I or A+A
669 USHORT usCpuCoreNumber;
670 ULONG ulPlatformTDP;
671 ULONG ulSmallACPlatformTDP;
672 ULONG ulPlatformTDC;
673 ULONG ulSmallACPlatformTDC;
674 ULONG ulApuTDP;
675 ULONG ulDGpuTDP;
676 ULONG ulDGpuUlvPower;
677 ULONG ulTjmax;
678} ATOM_PPLIB_PPM_Table;
679
680#pragma pack(pop)
681
682#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 75349cdaa84b..9fc61dd68bc0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1097,12 +1097,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1097 } 1097 }
1098 1098
1099 /* Align ring size */ 1099 /* Align ring size */
1100 rb_bufsz = drm_order(ring_size / 8); 1100 rb_bufsz = order_base_2(ring_size / 8);
1101 ring_size = (1 << (rb_bufsz + 1)) * 4; 1101 ring_size = (1 << (rb_bufsz + 1)) * 4;
1102 r100_cp_load_microcode(rdev); 1102 r100_cp_load_microcode(rdev);
1103 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1103 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
1104 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR, 1104 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
1105 0, 0x7fffff, RADEON_CP_PACKET2); 1105 RADEON_CP_PACKET2);
1106 if (r) { 1106 if (r) {
1107 return r; 1107 return r;
1108 } 1108 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 393880a09412..ea4d3734e6d9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1374,7 +1374,7 @@ static bool r600_is_display_hung(struct radeon_device *rdev)
1374 return true; 1374 return true;
1375} 1375}
1376 1376
1377static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) 1377u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1378{ 1378{
1379 u32 reset_mask = 0; 1379 u32 reset_mask = 0;
1380 u32 tmp; 1380 u32 tmp;
@@ -1622,28 +1622,6 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1622 return radeon_ring_test_lockup(rdev, ring); 1622 return radeon_ring_test_lockup(rdev, ring);
1623} 1623}
1624 1624
1625/**
1626 * r600_dma_is_lockup - Check if the DMA engine is locked up
1627 *
1628 * @rdev: radeon_device pointer
1629 * @ring: radeon_ring structure holding ring information
1630 *
1631 * Check if the async DMA engine is locked up.
1632 * Returns true if the engine appears to be locked up, false if not.
1633 */
1634bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1635{
1636 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1637
1638 if (!(reset_mask & RADEON_RESET_DMA)) {
1639 radeon_ring_lockup_update(ring);
1640 return false;
1641 }
1642 /* force ring activities */
1643 radeon_ring_force_activity(rdev, ring);
1644 return radeon_ring_test_lockup(rdev, ring);
1645}
1646
1647u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1625u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1648 u32 tiling_pipe_num, 1626 u32 tiling_pipe_num,
1649 u32 max_rb_num, 1627 u32 max_rb_num,
@@ -2299,9 +2277,13 @@ int r600_init_microcode(struct radeon_device *rdev)
2299 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { 2277 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2300 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); 2278 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2301 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 2279 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2302 if (err) 2280 if (err) {
2303 goto out; 2281 printk(KERN_ERR
2304 if (rdev->smc_fw->size != smc_req_size) { 2282 "smc: error loading firmware \"%s\"\n",
2283 fw_name);
2284 release_firmware(rdev->smc_fw);
2285 rdev->smc_fw = NULL;
2286 } else if (rdev->smc_fw->size != smc_req_size) {
2305 printk(KERN_ERR 2287 printk(KERN_ERR
2306 "smc: Bogus length %zu in firmware \"%s\"\n", 2288 "smc: Bogus length %zu in firmware \"%s\"\n",
2307 rdev->smc_fw->size, fw_name); 2289 rdev->smc_fw->size, fw_name);
@@ -2413,8 +2395,8 @@ int r600_cp_resume(struct radeon_device *rdev)
2413 WREG32(GRBM_SOFT_RESET, 0); 2395 WREG32(GRBM_SOFT_RESET, 0);
2414 2396
2415 /* Set ring buffer size */ 2397 /* Set ring buffer size */
2416 rb_bufsz = drm_order(ring->ring_size / 8); 2398 rb_bufsz = order_base_2(ring->ring_size / 8);
2417 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2399 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2418#ifdef __BIG_ENDIAN 2400#ifdef __BIG_ENDIAN
2419 tmp |= BUF_SWAP_32BIT; 2401 tmp |= BUF_SWAP_32BIT;
2420#endif 2402#endif
@@ -2467,7 +2449,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
2467 int r; 2449 int r;
2468 2450
2469 /* Align ring size */ 2451 /* Align ring size */
2470 rb_bufsz = drm_order(ring_size / 8); 2452 rb_bufsz = order_base_2(ring_size / 8);
2471 ring_size = (1 << (rb_bufsz + 1)) * 4; 2453 ring_size = (1 << (rb_bufsz + 1)) * 4;
2472 ring->ring_size = ring_size; 2454 ring->ring_size = ring_size;
2473 ring->align_mask = 16 - 1; 2455 ring->align_mask = 16 - 1;
@@ -2490,327 +2472,6 @@ void r600_cp_fini(struct radeon_device *rdev)
2490} 2472}
2491 2473
2492/* 2474/*
2493 * DMA
2494 * Starting with R600, the GPU has an asynchronous
2495 * DMA engine. The programming model is very similar
2496 * to the 3D engine (ring buffer, IBs, etc.), but the
2497 * DMA controller has it's own packet format that is
2498 * different form the PM4 format used by the 3D engine.
2499 * It supports copying data, writing embedded data,
2500 * solid fills, and a number of other things. It also
2501 * has support for tiling/detiling of buffers.
2502 */
2503/**
2504 * r600_dma_stop - stop the async dma engine
2505 *
2506 * @rdev: radeon_device pointer
2507 *
2508 * Stop the async dma engine (r6xx-evergreen).
2509 */
2510void r600_dma_stop(struct radeon_device *rdev)
2511{
2512 u32 rb_cntl = RREG32(DMA_RB_CNTL);
2513
2514 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2515
2516 rb_cntl &= ~DMA_RB_ENABLE;
2517 WREG32(DMA_RB_CNTL, rb_cntl);
2518
2519 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
2520}
2521
2522/**
2523 * r600_dma_resume - setup and start the async dma engine
2524 *
2525 * @rdev: radeon_device pointer
2526 *
2527 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
2528 * Returns 0 for success, error for failure.
2529 */
2530int r600_dma_resume(struct radeon_device *rdev)
2531{
2532 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2533 u32 rb_cntl, dma_cntl, ib_cntl;
2534 u32 rb_bufsz;
2535 int r;
2536
2537 /* Reset dma */
2538 if (rdev->family >= CHIP_RV770)
2539 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
2540 else
2541 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
2542 RREG32(SRBM_SOFT_RESET);
2543 udelay(50);
2544 WREG32(SRBM_SOFT_RESET, 0);
2545
2546 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
2547 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
2548
2549 /* Set ring buffer size in dwords */
2550 rb_bufsz = drm_order(ring->ring_size / 4);
2551 rb_cntl = rb_bufsz << 1;
2552#ifdef __BIG_ENDIAN
2553 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
2554#endif
2555 WREG32(DMA_RB_CNTL, rb_cntl);
2556
2557 /* Initialize the ring buffer's read and write pointers */
2558 WREG32(DMA_RB_RPTR, 0);
2559 WREG32(DMA_RB_WPTR, 0);
2560
2561 /* set the wb address whether it's enabled or not */
2562 WREG32(DMA_RB_RPTR_ADDR_HI,
2563 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
2564 WREG32(DMA_RB_RPTR_ADDR_LO,
2565 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
2566
2567 if (rdev->wb.enabled)
2568 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
2569
2570 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
2571
2572 /* enable DMA IBs */
2573 ib_cntl = DMA_IB_ENABLE;
2574#ifdef __BIG_ENDIAN
2575 ib_cntl |= DMA_IB_SWAP_ENABLE;
2576#endif
2577 WREG32(DMA_IB_CNTL, ib_cntl);
2578
2579 dma_cntl = RREG32(DMA_CNTL);
2580 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
2581 WREG32(DMA_CNTL, dma_cntl);
2582
2583 if (rdev->family >= CHIP_RV770)
2584 WREG32(DMA_MODE, 1);
2585
2586 ring->wptr = 0;
2587 WREG32(DMA_RB_WPTR, ring->wptr << 2);
2588
2589 ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
2590
2591 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
2592
2593 ring->ready = true;
2594
2595 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
2596 if (r) {
2597 ring->ready = false;
2598 return r;
2599 }
2600
2601 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2602
2603 return 0;
2604}
2605
2606/**
2607 * r600_dma_fini - tear down the async dma engine
2608 *
2609 * @rdev: radeon_device pointer
2610 *
2611 * Stop the async dma engine and free the ring (r6xx-evergreen).
2612 */
2613void r600_dma_fini(struct radeon_device *rdev)
2614{
2615 r600_dma_stop(rdev);
2616 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
2617}
2618
2619/*
2620 * UVD
2621 */
2622int r600_uvd_rbc_start(struct radeon_device *rdev)
2623{
2624 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2625 uint64_t rptr_addr;
2626 uint32_t rb_bufsz, tmp;
2627 int r;
2628
2629 rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
2630
2631 if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
2632 DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
2633 return -EINVAL;
2634 }
2635
2636 /* force RBC into idle state */
2637 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2638
2639 /* Set the write pointer delay */
2640 WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
2641
2642 /* set the wb address */
2643 WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
2644
2645 /* programm the 4GB memory segment for rptr and ring buffer */
2646 WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
2647 (0x7 << 16) | (0x1 << 31));
2648
2649 /* Initialize the ring buffer's read and write pointers */
2650 WREG32(UVD_RBC_RB_RPTR, 0x0);
2651
2652 ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
2653 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
2654
2655 /* set the ring address */
2656 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
2657
2658 /* Set ring buffer size */
2659 rb_bufsz = drm_order(ring->ring_size);
2660 rb_bufsz = (0x1 << 8) | rb_bufsz;
2661 WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
2662
2663 ring->ready = true;
2664 r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
2665 if (r) {
2666 ring->ready = false;
2667 return r;
2668 }
2669
2670 r = radeon_ring_lock(rdev, ring, 10);
2671 if (r) {
2672 DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
2673 return r;
2674 }
2675
2676 tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
2677 radeon_ring_write(ring, tmp);
2678 radeon_ring_write(ring, 0xFFFFF);
2679
2680 tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
2681 radeon_ring_write(ring, tmp);
2682 radeon_ring_write(ring, 0xFFFFF);
2683
2684 tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
2685 radeon_ring_write(ring, tmp);
2686 radeon_ring_write(ring, 0xFFFFF);
2687
2688 /* Clear timeout status bits */
2689 radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
2690 radeon_ring_write(ring, 0x8);
2691
2692 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
2693 radeon_ring_write(ring, 3);
2694
2695 radeon_ring_unlock_commit(rdev, ring);
2696
2697 return 0;
2698}
2699
2700void r600_uvd_rbc_stop(struct radeon_device *rdev)
2701{
2702 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2703
2704 /* force RBC into idle state */
2705 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2706 ring->ready = false;
2707}
2708
2709int r600_uvd_init(struct radeon_device *rdev)
2710{
2711 int i, j, r;
2712 /* disable byte swapping */
2713 u32 lmi_swap_cntl = 0;
2714 u32 mp_swap_cntl = 0;
2715
2716 /* raise clocks while booting up the VCPU */
2717 radeon_set_uvd_clocks(rdev, 53300, 40000);
2718
2719 /* disable clock gating */
2720 WREG32(UVD_CGC_GATE, 0);
2721
2722 /* disable interupt */
2723 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
2724
2725 /* put LMI, VCPU, RBC etc... into reset */
2726 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
2727 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
2728 CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
2729 mdelay(5);
2730
2731 /* take UVD block out of reset */
2732 WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
2733 mdelay(5);
2734
2735 /* initialize UVD memory controller */
2736 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
2737 (1 << 21) | (1 << 9) | (1 << 20));
2738
2739#ifdef __BIG_ENDIAN
2740 /* swap (8 in 32) RB and IB */
2741 lmi_swap_cntl = 0xa;
2742 mp_swap_cntl = 0;
2743#endif
2744 WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
2745 WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
2746
2747 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
2748 WREG32(UVD_MPC_SET_MUXA1, 0x0);
2749 WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
2750 WREG32(UVD_MPC_SET_MUXB1, 0x0);
2751 WREG32(UVD_MPC_SET_ALU, 0);
2752 WREG32(UVD_MPC_SET_MUX, 0x88);
2753
2754 /* Stall UMC */
2755 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
2756 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
2757
2758 /* take all subblocks out of reset, except VCPU */
2759 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
2760 mdelay(5);
2761
2762 /* enable VCPU clock */
2763 WREG32(UVD_VCPU_CNTL, 1 << 9);
2764
2765 /* enable UMC */
2766 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
2767
2768 /* boot up the VCPU */
2769 WREG32(UVD_SOFT_RESET, 0);
2770 mdelay(10);
2771
2772 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
2773
2774 for (i = 0; i < 10; ++i) {
2775 uint32_t status;
2776 for (j = 0; j < 100; ++j) {
2777 status = RREG32(UVD_STATUS);
2778 if (status & 2)
2779 break;
2780 mdelay(10);
2781 }
2782 r = 0;
2783 if (status & 2)
2784 break;
2785
2786 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
2787 WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
2788 mdelay(10);
2789 WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
2790 mdelay(10);
2791 r = -1;
2792 }
2793
2794 if (r) {
2795 DRM_ERROR("UVD not responding, giving up!!!\n");
2796 radeon_set_uvd_clocks(rdev, 0, 0);
2797 return r;
2798 }
2799
2800 /* enable interupt */
2801 WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
2802
2803 r = r600_uvd_rbc_start(rdev);
2804 if (!r)
2805 DRM_INFO("UVD initialized successfully.\n");
2806
2807 /* lower clocks again */
2808 radeon_set_uvd_clocks(rdev, 0, 0);
2809
2810 return r;
2811}
2812
2813/*
2814 * GPU scratch registers helpers function. 2475 * GPU scratch registers helpers function.
2815 */ 2476 */
2816void r600_scratch_init(struct radeon_device *rdev) 2477void r600_scratch_init(struct radeon_device *rdev)
@@ -2865,94 +2526,6 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2865 return r; 2526 return r;
2866} 2527}
2867 2528
2868/**
2869 * r600_dma_ring_test - simple async dma engine test
2870 *
2871 * @rdev: radeon_device pointer
2872 * @ring: radeon_ring structure holding ring information
2873 *
2874 * Test the DMA engine by writing using it to write an
2875 * value to memory. (r6xx-SI).
2876 * Returns 0 for success, error for failure.
2877 */
2878int r600_dma_ring_test(struct radeon_device *rdev,
2879 struct radeon_ring *ring)
2880{
2881 unsigned i;
2882 int r;
2883 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
2884 u32 tmp;
2885
2886 if (!ptr) {
2887 DRM_ERROR("invalid vram scratch pointer\n");
2888 return -EINVAL;
2889 }
2890
2891 tmp = 0xCAFEDEAD;
2892 writel(tmp, ptr);
2893
2894 r = radeon_ring_lock(rdev, ring, 4);
2895 if (r) {
2896 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
2897 return r;
2898 }
2899 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
2900 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
2901 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
2902 radeon_ring_write(ring, 0xDEADBEEF);
2903 radeon_ring_unlock_commit(rdev, ring);
2904
2905 for (i = 0; i < rdev->usec_timeout; i++) {
2906 tmp = readl(ptr);
2907 if (tmp == 0xDEADBEEF)
2908 break;
2909 DRM_UDELAY(1);
2910 }
2911
2912 if (i < rdev->usec_timeout) {
2913 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2914 } else {
2915 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2916 ring->idx, tmp);
2917 r = -EINVAL;
2918 }
2919 return r;
2920}
2921
2922int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2923{
2924 uint32_t tmp = 0;
2925 unsigned i;
2926 int r;
2927
2928 WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
2929 r = radeon_ring_lock(rdev, ring, 3);
2930 if (r) {
2931 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
2932 ring->idx, r);
2933 return r;
2934 }
2935 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
2936 radeon_ring_write(ring, 0xDEADBEEF);
2937 radeon_ring_unlock_commit(rdev, ring);
2938 for (i = 0; i < rdev->usec_timeout; i++) {
2939 tmp = RREG32(UVD_CONTEXT_ID);
2940 if (tmp == 0xDEADBEEF)
2941 break;
2942 DRM_UDELAY(1);
2943 }
2944
2945 if (i < rdev->usec_timeout) {
2946 DRM_INFO("ring test on %d succeeded in %d usecs\n",
2947 ring->idx, i);
2948 } else {
2949 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2950 ring->idx, tmp);
2951 r = -EINVAL;
2952 }
2953 return r;
2954}
2955
2956/* 2529/*
2957 * CP fences/semaphores 2530 * CP fences/semaphores
2958 */ 2531 */
@@ -3004,30 +2577,6 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
3004 } 2577 }
3005} 2578}
3006 2579
3007void r600_uvd_fence_emit(struct radeon_device *rdev,
3008 struct radeon_fence *fence)
3009{
3010 struct radeon_ring *ring = &rdev->ring[fence->ring];
3011 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
3012
3013 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
3014 radeon_ring_write(ring, fence->seq);
3015 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
3016 radeon_ring_write(ring, addr & 0xffffffff);
3017 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
3018 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
3019 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
3020 radeon_ring_write(ring, 0);
3021
3022 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
3023 radeon_ring_write(ring, 0);
3024 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
3025 radeon_ring_write(ring, 0);
3026 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
3027 radeon_ring_write(ring, 2);
3028 return;
3029}
3030
3031void r600_semaphore_ring_emit(struct radeon_device *rdev, 2580void r600_semaphore_ring_emit(struct radeon_device *rdev,
3032 struct radeon_ring *ring, 2581 struct radeon_ring *ring,
3033 struct radeon_semaphore *semaphore, 2582 struct radeon_semaphore *semaphore,
@@ -3044,95 +2593,6 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
3044 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2593 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
3045} 2594}
3046 2595
3047/*
3048 * DMA fences/semaphores
3049 */
3050
3051/**
3052 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
3053 *
3054 * @rdev: radeon_device pointer
3055 * @fence: radeon fence object
3056 *
3057 * Add a DMA fence packet to the ring to write
3058 * the fence seq number and DMA trap packet to generate
3059 * an interrupt if needed (r6xx-r7xx).
3060 */
3061void r600_dma_fence_ring_emit(struct radeon_device *rdev,
3062 struct radeon_fence *fence)
3063{
3064 struct radeon_ring *ring = &rdev->ring[fence->ring];
3065 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3066
3067 /* write the fence */
3068 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
3069 radeon_ring_write(ring, addr & 0xfffffffc);
3070 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
3071 radeon_ring_write(ring, lower_32_bits(fence->seq));
3072 /* generate an interrupt */
3073 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
3074}
3075
3076/**
3077 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
3078 *
3079 * @rdev: radeon_device pointer
3080 * @ring: radeon_ring structure holding ring information
3081 * @semaphore: radeon semaphore object
3082 * @emit_wait: wait or signal semaphore
3083 *
3084 * Add a DMA semaphore packet to the ring wait on or signal
3085 * other rings (r6xx-SI).
3086 */
3087void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
3088 struct radeon_ring *ring,
3089 struct radeon_semaphore *semaphore,
3090 bool emit_wait)
3091{
3092 u64 addr = semaphore->gpu_addr;
3093 u32 s = emit_wait ? 0 : 1;
3094
3095 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
3096 radeon_ring_write(ring, addr & 0xfffffffc);
3097 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
3098}
3099
3100void r600_uvd_semaphore_emit(struct radeon_device *rdev,
3101 struct radeon_ring *ring,
3102 struct radeon_semaphore *semaphore,
3103 bool emit_wait)
3104{
3105 uint64_t addr = semaphore->gpu_addr;
3106
3107 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
3108 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
3109
3110 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
3111 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
3112
3113 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
3114 radeon_ring_write(ring, emit_wait ? 1 : 0);
3115}
3116
3117int r600_copy_blit(struct radeon_device *rdev,
3118 uint64_t src_offset,
3119 uint64_t dst_offset,
3120 unsigned num_gpu_pages,
3121 struct radeon_fence **fence)
3122{
3123 struct radeon_semaphore *sem = NULL;
3124 struct radeon_sa_bo *vb = NULL;
3125 int r;
3126
3127 r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
3128 if (r) {
3129 return r;
3130 }
3131 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
3132 r600_blit_done_copy(rdev, fence, vb, sem);
3133 return 0;
3134}
3135
3136/** 2596/**
3137 * r600_copy_cpdma - copy pages using the CP DMA engine 2597 * r600_copy_cpdma - copy pages using the CP DMA engine
3138 * 2598 *
@@ -3166,7 +2626,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
3166 2626
3167 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 2627 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3168 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 2628 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3169 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21); 2629 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
3170 if (r) { 2630 if (r) {
3171 DRM_ERROR("radeon: moving bo (%d).\n", r); 2631 DRM_ERROR("radeon: moving bo (%d).\n", r);
3172 radeon_semaphore_free(rdev, &sem, NULL); 2632 radeon_semaphore_free(rdev, &sem, NULL);
@@ -3181,6 +2641,9 @@ int r600_copy_cpdma(struct radeon_device *rdev,
3181 radeon_semaphore_free(rdev, &sem, NULL); 2641 radeon_semaphore_free(rdev, &sem, NULL);
3182 } 2642 }
3183 2643
2644 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2645 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2646 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
3184 for (i = 0; i < num_loops; i++) { 2647 for (i = 0; i < num_loops; i++) {
3185 cur_size_in_bytes = size_in_bytes; 2648 cur_size_in_bytes = size_in_bytes;
3186 if (cur_size_in_bytes > 0x1fffff) 2649 if (cur_size_in_bytes > 0x1fffff)
@@ -3214,80 +2677,6 @@ int r600_copy_cpdma(struct radeon_device *rdev,
3214 return r; 2677 return r;
3215} 2678}
3216 2679
3217/**
3218 * r600_copy_dma - copy pages using the DMA engine
3219 *
3220 * @rdev: radeon_device pointer
3221 * @src_offset: src GPU address
3222 * @dst_offset: dst GPU address
3223 * @num_gpu_pages: number of GPU pages to xfer
3224 * @fence: radeon fence object
3225 *
3226 * Copy GPU paging using the DMA engine (r6xx).
3227 * Used by the radeon ttm implementation to move pages if
3228 * registered as the asic copy callback.
3229 */
3230int r600_copy_dma(struct radeon_device *rdev,
3231 uint64_t src_offset, uint64_t dst_offset,
3232 unsigned num_gpu_pages,
3233 struct radeon_fence **fence)
3234{
3235 struct radeon_semaphore *sem = NULL;
3236 int ring_index = rdev->asic->copy.dma_ring_index;
3237 struct radeon_ring *ring = &rdev->ring[ring_index];
3238 u32 size_in_dw, cur_size_in_dw;
3239 int i, num_loops;
3240 int r = 0;
3241
3242 r = radeon_semaphore_create(rdev, &sem);
3243 if (r) {
3244 DRM_ERROR("radeon: moving bo (%d).\n", r);
3245 return r;
3246 }
3247
3248 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
3249 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
3250 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
3251 if (r) {
3252 DRM_ERROR("radeon: moving bo (%d).\n", r);
3253 radeon_semaphore_free(rdev, &sem, NULL);
3254 return r;
3255 }
3256
3257 if (radeon_fence_need_sync(*fence, ring->idx)) {
3258 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3259 ring->idx);
3260 radeon_fence_note_sync(*fence, ring->idx);
3261 } else {
3262 radeon_semaphore_free(rdev, &sem, NULL);
3263 }
3264
3265 for (i = 0; i < num_loops; i++) {
3266 cur_size_in_dw = size_in_dw;
3267 if (cur_size_in_dw > 0xFFFE)
3268 cur_size_in_dw = 0xFFFE;
3269 size_in_dw -= cur_size_in_dw;
3270 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
3271 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3272 radeon_ring_write(ring, src_offset & 0xfffffffc);
3273 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
3274 (upper_32_bits(src_offset) & 0xff)));
3275 src_offset += cur_size_in_dw * 4;
3276 dst_offset += cur_size_in_dw * 4;
3277 }
3278
3279 r = radeon_fence_emit(rdev, fence, ring->idx);
3280 if (r) {
3281 radeon_ring_unlock_undo(rdev, ring);
3282 return r;
3283 }
3284
3285 radeon_ring_unlock_commit(rdev, ring);
3286 radeon_semaphore_free(rdev, &sem, *fence);
3287
3288 return r;
3289}
3290
3291int r600_set_surface_reg(struct radeon_device *rdev, int reg, 2680int r600_set_surface_reg(struct radeon_device *rdev, int reg,
3292 uint32_t tiling_flags, uint32_t pitch, 2681 uint32_t tiling_flags, uint32_t pitch,
3293 uint32_t offset, uint32_t obj_size) 2682 uint32_t offset, uint32_t obj_size)
@@ -3309,6 +2698,13 @@ static int r600_startup(struct radeon_device *rdev)
3309 /* enable pcie gen2 link */ 2698 /* enable pcie gen2 link */
3310 r600_pcie_gen2_enable(rdev); 2699 r600_pcie_gen2_enable(rdev);
3311 2700
2701 /* scratch needs to be initialized before MC */
2702 r = r600_vram_scratch_init(rdev);
2703 if (r)
2704 return r;
2705
2706 r600_mc_program(rdev);
2707
3312 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 2708 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3313 r = r600_init_microcode(rdev); 2709 r = r600_init_microcode(rdev);
3314 if (r) { 2710 if (r) {
@@ -3317,11 +2713,6 @@ static int r600_startup(struct radeon_device *rdev)
3317 } 2713 }
3318 } 2714 }
3319 2715
3320 r = r600_vram_scratch_init(rdev);
3321 if (r)
3322 return r;
3323
3324 r600_mc_program(rdev);
3325 if (rdev->flags & RADEON_IS_AGP) { 2716 if (rdev->flags & RADEON_IS_AGP) {
3326 r600_agp_enable(rdev); 2717 r600_agp_enable(rdev);
3327 } else { 2718 } else {
@@ -3330,12 +2721,6 @@ static int r600_startup(struct radeon_device *rdev)
3330 return r; 2721 return r;
3331 } 2722 }
3332 r600_gpu_init(rdev); 2723 r600_gpu_init(rdev);
3333 r = r600_blit_init(rdev);
3334 if (r) {
3335 r600_blit_fini(rdev);
3336 rdev->asic->copy.copy = NULL;
3337 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
3338 }
3339 2724
3340 /* allocate wb buffer */ 2725 /* allocate wb buffer */
3341 r = radeon_wb_init(rdev); 2726 r = radeon_wb_init(rdev);
@@ -3372,14 +2757,14 @@ static int r600_startup(struct radeon_device *rdev)
3372 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2757 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3373 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 2758 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3374 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 2759 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3375 0, 0xfffff, RADEON_CP_PACKET2); 2760 RADEON_CP_PACKET2);
3376 if (r) 2761 if (r)
3377 return r; 2762 return r;
3378 2763
3379 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 2764 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3380 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 2765 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3381 DMA_RB_RPTR, DMA_RB_WPTR, 2766 DMA_RB_RPTR, DMA_RB_WPTR,
3382 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 2767 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3383 if (r) 2768 if (r)
3384 return r; 2769 return r;
3385 2770
@@ -3548,7 +2933,6 @@ int r600_init(struct radeon_device *rdev)
3548void r600_fini(struct radeon_device *rdev) 2933void r600_fini(struct radeon_device *rdev)
3549{ 2934{
3550 r600_audio_fini(rdev); 2935 r600_audio_fini(rdev);
3551 r600_blit_fini(rdev);
3552 r600_cp_fini(rdev); 2936 r600_cp_fini(rdev);
3553 r600_dma_fini(rdev); 2937 r600_dma_fini(rdev);
3554 r600_irq_fini(rdev); 2938 r600_irq_fini(rdev);
@@ -3600,16 +2984,6 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3600 radeon_ring_write(ring, ib->length_dw); 2984 radeon_ring_write(ring, ib->length_dw);
3601} 2985}
3602 2986
3603void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3604{
3605 struct radeon_ring *ring = &rdev->ring[ib->ring];
3606
3607 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
3608 radeon_ring_write(ring, ib->gpu_addr);
3609 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
3610 radeon_ring_write(ring, ib->length_dw);
3611}
3612
3613int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 2987int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3614{ 2988{
3615 struct radeon_ib ib; 2989 struct radeon_ib ib;
@@ -3663,139 +3037,6 @@ free_scratch:
3663 return r; 3037 return r;
3664} 3038}
3665 3039
3666/**
3667 * r600_dma_ib_test - test an IB on the DMA engine
3668 *
3669 * @rdev: radeon_device pointer
3670 * @ring: radeon_ring structure holding ring information
3671 *
3672 * Test a simple IB in the DMA ring (r6xx-SI).
3673 * Returns 0 on success, error on failure.
3674 */
3675int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3676{
3677 struct radeon_ib ib;
3678 unsigned i;
3679 int r;
3680 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3681 u32 tmp = 0;
3682
3683 if (!ptr) {
3684 DRM_ERROR("invalid vram scratch pointer\n");
3685 return -EINVAL;
3686 }
3687
3688 tmp = 0xCAFEDEAD;
3689 writel(tmp, ptr);
3690
3691 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3692 if (r) {
3693 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3694 return r;
3695 }
3696
3697 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
3698 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
3699 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
3700 ib.ptr[3] = 0xDEADBEEF;
3701 ib.length_dw = 4;
3702
3703 r = radeon_ib_schedule(rdev, &ib, NULL);
3704 if (r) {
3705 radeon_ib_free(rdev, &ib);
3706 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3707 return r;
3708 }
3709 r = radeon_fence_wait(ib.fence, false);
3710 if (r) {
3711 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3712 return r;
3713 }
3714 for (i = 0; i < rdev->usec_timeout; i++) {
3715 tmp = readl(ptr);
3716 if (tmp == 0xDEADBEEF)
3717 break;
3718 DRM_UDELAY(1);
3719 }
3720 if (i < rdev->usec_timeout) {
3721 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3722 } else {
3723 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
3724 r = -EINVAL;
3725 }
3726 radeon_ib_free(rdev, &ib);
3727 return r;
3728}
3729
3730int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3731{
3732 struct radeon_fence *fence = NULL;
3733 int r;
3734
3735 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
3736 if (r) {
3737 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
3738 return r;
3739 }
3740
3741 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
3742 if (r) {
3743 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
3744 goto error;
3745 }
3746
3747 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
3748 if (r) {
3749 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
3750 goto error;
3751 }
3752
3753 r = radeon_fence_wait(fence, false);
3754 if (r) {
3755 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3756 goto error;
3757 }
3758 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
3759error:
3760 radeon_fence_unref(&fence);
3761 radeon_set_uvd_clocks(rdev, 0, 0);
3762 return r;
3763}
3764
3765/**
3766 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
3767 *
3768 * @rdev: radeon_device pointer
3769 * @ib: IB object to schedule
3770 *
3771 * Schedule an IB in the DMA ring (r6xx-r7xx).
3772 */
3773void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3774{
3775 struct radeon_ring *ring = &rdev->ring[ib->ring];
3776
3777 if (rdev->wb.enabled) {
3778 u32 next_rptr = ring->wptr + 4;
3779 while ((next_rptr & 7) != 5)
3780 next_rptr++;
3781 next_rptr += 3;
3782 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
3783 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3784 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3785 radeon_ring_write(ring, next_rptr);
3786 }
3787
3788 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3789 * Pad as necessary with NOPs.
3790 */
3791 while ((ring->wptr & 7) != 5)
3792 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3793 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
3794 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3795 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3796
3797}
3798
3799/* 3040/*
3800 * Interrupts 3041 * Interrupts
3801 * 3042 *
@@ -3812,7 +3053,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3812 u32 rb_bufsz; 3053 u32 rb_bufsz;
3813 3054
3814 /* Align ring size */ 3055 /* Align ring size */
3815 rb_bufsz = drm_order(ring_size / 4); 3056 rb_bufsz = order_base_2(ring_size / 4);
3816 ring_size = (1 << rb_bufsz) * 4; 3057 ring_size = (1 << rb_bufsz) * 4;
3817 rdev->ih.ring_size = ring_size; 3058 rdev->ih.ring_size = ring_size;
3818 rdev->ih.ptr_mask = rdev->ih.ring_size - 1; 3059 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@ -4049,7 +3290,7 @@ int r600_irq_init(struct radeon_device *rdev)
4049 WREG32(INTERRUPT_CNTL, interrupt_cntl); 3290 WREG32(INTERRUPT_CNTL, interrupt_cntl);
4050 3291
4051 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 3292 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
4052 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 3293 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
4053 3294
4054 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 3295 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
4055 IH_WPTR_OVERFLOW_CLEAR | 3296 IH_WPTR_OVERFLOW_CLEAR |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index c92eb86a8e55..47fc2b886979 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,12 +57,12 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
57 */ 57 */
58static int r600_audio_chipset_supported(struct radeon_device *rdev) 58static int r600_audio_chipset_supported(struct radeon_device *rdev)
59{ 59{
60 return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev); 60 return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
61} 61}
62 62
63struct r600_audio r600_audio_status(struct radeon_device *rdev) 63struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
64{ 64{
65 struct r600_audio status; 65 struct r600_audio_pin status;
66 uint32_t value; 66 uint32_t value;
67 67
68 value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); 68 value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
@@ -120,16 +120,16 @@ void r600_audio_update_hdmi(struct work_struct *work)
120 struct radeon_device *rdev = container_of(work, struct radeon_device, 120 struct radeon_device *rdev = container_of(work, struct radeon_device,
121 audio_work); 121 audio_work);
122 struct drm_device *dev = rdev->ddev; 122 struct drm_device *dev = rdev->ddev;
123 struct r600_audio audio_status = r600_audio_status(rdev); 123 struct r600_audio_pin audio_status = r600_audio_status(rdev);
124 struct drm_encoder *encoder; 124 struct drm_encoder *encoder;
125 bool changed = false; 125 bool changed = false;
126 126
127 if (rdev->audio_status.channels != audio_status.channels || 127 if (rdev->audio.pin[0].channels != audio_status.channels ||
128 rdev->audio_status.rate != audio_status.rate || 128 rdev->audio.pin[0].rate != audio_status.rate ||
129 rdev->audio_status.bits_per_sample != audio_status.bits_per_sample || 129 rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
130 rdev->audio_status.status_bits != audio_status.status_bits || 130 rdev->audio.pin[0].status_bits != audio_status.status_bits ||
131 rdev->audio_status.category_code != audio_status.category_code) { 131 rdev->audio.pin[0].category_code != audio_status.category_code) {
132 rdev->audio_status = audio_status; 132 rdev->audio.pin[0] = audio_status;
133 changed = true; 133 changed = true;
134 } 134 }
135 135
@@ -141,13 +141,13 @@ void r600_audio_update_hdmi(struct work_struct *work)
141 } 141 }
142} 142}
143 143
144/* 144/* enable the audio stream */
145 * turn on/off audio engine 145static void r600_audio_enable(struct radeon_device *rdev,
146 */ 146 struct r600_audio_pin *pin,
147static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable) 147 bool enable)
148{ 148{
149 u32 value = 0; 149 u32 value = 0;
150 DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling"); 150
151 if (ASIC_IS_DCE4(rdev)) { 151 if (ASIC_IS_DCE4(rdev)) {
152 if (enable) { 152 if (enable) {
153 value |= 0x81000000; /* Required to enable audio */ 153 value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +158,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
158 WREG32_P(R600_AUDIO_ENABLE, 158 WREG32_P(R600_AUDIO_ENABLE,
159 enable ? 0x81000000 : 0x0, ~0x81000000); 159 enable ? 0x81000000 : 0x0, ~0x81000000);
160 } 160 }
161 rdev->audio_enabled = enable; 161 DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
162} 162}
163 163
164/* 164/*
@@ -169,13 +169,17 @@ int r600_audio_init(struct radeon_device *rdev)
169 if (!radeon_audio || !r600_audio_chipset_supported(rdev)) 169 if (!radeon_audio || !r600_audio_chipset_supported(rdev))
170 return 0; 170 return 0;
171 171
172 r600_audio_engine_enable(rdev, true); 172 rdev->audio.enabled = true;
173
174 rdev->audio.num_pins = 1;
175 rdev->audio.pin[0].channels = -1;
176 rdev->audio.pin[0].rate = -1;
177 rdev->audio.pin[0].bits_per_sample = -1;
178 rdev->audio.pin[0].status_bits = 0;
179 rdev->audio.pin[0].category_code = 0;
180 rdev->audio.pin[0].id = 0;
173 181
174 rdev->audio_status.channels = -1; 182 r600_audio_enable(rdev, &rdev->audio.pin[0], true);
175 rdev->audio_status.rate = -1;
176 rdev->audio_status.bits_per_sample = -1;
177 rdev->audio_status.status_bits = 0;
178 rdev->audio_status.category_code = 0;
179 183
180 return 0; 184 return 0;
181} 185}
@@ -186,8 +190,16 @@ int r600_audio_init(struct radeon_device *rdev)
186 */ 190 */
187void r600_audio_fini(struct radeon_device *rdev) 191void r600_audio_fini(struct radeon_device *rdev)
188{ 192{
189 if (!rdev->audio_enabled) 193 if (!rdev->audio.enabled)
190 return; 194 return;
191 195
192 r600_audio_engine_enable(rdev, false); 196 r600_audio_enable(rdev, &rdev->audio.pin[0], false);
197
198 rdev->audio.enabled = false;
199}
200
201struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
202{
203 /* only one pin on 6xx-NI */
204 return &rdev->audio.pin[0];
193} 205}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index f651881eb0ae..daf7572be976 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -31,6 +31,37 @@
31 31
32#include "r600_blit_shaders.h" 32#include "r600_blit_shaders.h"
33 33
34/* 23 bits of float fractional data */
35#define I2F_FRAC_BITS 23
36#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
37
38/*
39 * Converts unsigned integer into 32-bit IEEE floating point representation.
40 * Will be exact from 0 to 2^24. Above that, we round towards zero
41 * as the fractional bits will not fit in a float. (It would be better to
42 * round towards even as the fpu does, but that is slower.)
43 */
44static __pure uint32_t int2float(uint32_t x)
45{
46 uint32_t msb, exponent, fraction;
47
48 /* Zero is special */
49 if (!x) return 0;
50
51 /* Get location of the most significant bit */
52 msb = __fls(x);
53
54 /*
55 * Use a rotate instead of a shift because that works both leftwards
56 * and rightwards due to the mod(32) behaviour. This means we don't
57 * need to check to see if we are above 2^24 or not.
58 */
59 fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
60 exponent = (127 + msb) << I2F_FRAC_BITS;
61
62 return fraction + exponent;
63}
64
34#define DI_PT_RECTLIST 0x11 65#define DI_PT_RECTLIST 0x11
35#define DI_INDEX_SIZE_16_BIT 0x0 66#define DI_INDEX_SIZE_16_BIT 0x0
36#define DI_SRC_SEL_AUTO_INDEX 0x2 67#define DI_SRC_SEL_AUTO_INDEX 0x2
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
deleted file mode 100644
index 9fb5780a552f..000000000000
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ /dev/null
@@ -1,785 +0,0 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#include <drm/drmP.h>
27#include <drm/radeon_drm.h>
28#include "radeon.h"
29
30#include "r600d.h"
31#include "r600_blit_shaders.h"
32#include "radeon_blit_common.h"
33
34/* 23 bits of float fractional data */
35#define I2F_FRAC_BITS 23
36#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
37
38/*
39 * Converts unsigned integer into 32-bit IEEE floating point representation.
40 * Will be exact from 0 to 2^24. Above that, we round towards zero
41 * as the fractional bits will not fit in a float. (It would be better to
42 * round towards even as the fpu does, but that is slower.)
43 */
44__pure uint32_t int2float(uint32_t x)
45{
46 uint32_t msb, exponent, fraction;
47
48 /* Zero is special */
49 if (!x) return 0;
50
51 /* Get location of the most significant bit */
52 msb = __fls(x);
53
54 /*
55 * Use a rotate instead of a shift because that works both leftwards
56 * and rightwards due to the mod(32) behaviour. This means we don't
57 * need to check to see if we are above 2^24 or not.
58 */
59 fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
60 exponent = (127 + msb) << I2F_FRAC_BITS;
61
62 return fraction + exponent;
63}
64
65/* emits 21 on rv770+, 23 on r600 */
66static void
67set_render_target(struct radeon_device *rdev, int format,
68 int w, int h, u64 gpu_addr)
69{
70 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
71 u32 cb_color_info;
72 int pitch, slice;
73
74 h = ALIGN(h, 8);
75 if (h < 8)
76 h = 8;
77
78 cb_color_info = CB_FORMAT(format) |
79 CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
80 CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
81 pitch = (w / 8) - 1;
82 slice = ((w * h) / 64) - 1;
83
84 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
85 radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
86 radeon_ring_write(ring, gpu_addr >> 8);
87
88 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
89 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
90 radeon_ring_write(ring, 2 << 0);
91 }
92
93 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
94 radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
95 radeon_ring_write(ring, (pitch << 0) | (slice << 10));
96
97 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
98 radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
99 radeon_ring_write(ring, 0);
100
101 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
102 radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
103 radeon_ring_write(ring, cb_color_info);
104
105 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
106 radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
107 radeon_ring_write(ring, 0);
108
109 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
110 radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
111 radeon_ring_write(ring, 0);
112
113 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
114 radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
115 radeon_ring_write(ring, 0);
116}
117
118/* emits 5dw */
119static void
120cp_set_surface_sync(struct radeon_device *rdev,
121 u32 sync_type, u32 size,
122 u64 mc_addr)
123{
124 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
125 u32 cp_coher_size;
126
127 if (size == 0xffffffff)
128 cp_coher_size = 0xffffffff;
129 else
130 cp_coher_size = ((size + 255) >> 8);
131
132 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
133 radeon_ring_write(ring, sync_type);
134 radeon_ring_write(ring, cp_coher_size);
135 radeon_ring_write(ring, mc_addr >> 8);
136 radeon_ring_write(ring, 10); /* poll interval */
137}
138
139/* emits 21dw + 1 surface sync = 26dw */
140static void
141set_shaders(struct radeon_device *rdev)
142{
143 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
144 u64 gpu_addr;
145 u32 sq_pgm_resources;
146
147 /* setup shader regs */
148 sq_pgm_resources = (1 << 0);
149
150 /* VS */
151 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
152 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
153 radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
154 radeon_ring_write(ring, gpu_addr >> 8);
155
156 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
157 radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
158 radeon_ring_write(ring, sq_pgm_resources);
159
160 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
161 radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
162 radeon_ring_write(ring, 0);
163
164 /* PS */
165 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
166 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
167 radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
168 radeon_ring_write(ring, gpu_addr >> 8);
169
170 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
171 radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
172 radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
173
174 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
175 radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
176 radeon_ring_write(ring, 2);
177
178 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
179 radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
180 radeon_ring_write(ring, 0);
181
182 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
183 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
184}
185
186/* emits 9 + 1 sync (5) = 14*/
187static void
188set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
189{
190 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
191 u32 sq_vtx_constant_word2;
192
193 sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
194 SQ_VTXC_STRIDE(16);
195#ifdef __BIG_ENDIAN
196 sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
197#endif
198
199 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
200 radeon_ring_write(ring, 0x460);
201 radeon_ring_write(ring, gpu_addr & 0xffffffff);
202 radeon_ring_write(ring, 48 - 1);
203 radeon_ring_write(ring, sq_vtx_constant_word2);
204 radeon_ring_write(ring, 1 << 0);
205 radeon_ring_write(ring, 0);
206 radeon_ring_write(ring, 0);
207 radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
208
209 if ((rdev->family == CHIP_RV610) ||
210 (rdev->family == CHIP_RV620) ||
211 (rdev->family == CHIP_RS780) ||
212 (rdev->family == CHIP_RS880) ||
213 (rdev->family == CHIP_RV710))
214 cp_set_surface_sync(rdev,
215 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
216 else
217 cp_set_surface_sync(rdev,
218 PACKET3_VC_ACTION_ENA, 48, gpu_addr);
219}
220
221/* emits 9 */
222static void
223set_tex_resource(struct radeon_device *rdev,
224 int format, int w, int h, int pitch,
225 u64 gpu_addr, u32 size)
226{
227 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
228 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
229
230 if (h < 1)
231 h = 1;
232
233 sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
234 S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
235 sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
236 S_038000_TEX_WIDTH(w - 1);
237
238 sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
239 sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
240
241 sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
242 S_038010_DST_SEL_X(SQ_SEL_X) |
243 S_038010_DST_SEL_Y(SQ_SEL_Y) |
244 S_038010_DST_SEL_Z(SQ_SEL_Z) |
245 S_038010_DST_SEL_W(SQ_SEL_W);
246
247 cp_set_surface_sync(rdev,
248 PACKET3_TC_ACTION_ENA, size, gpu_addr);
249
250 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
251 radeon_ring_write(ring, 0);
252 radeon_ring_write(ring, sq_tex_resource_word0);
253 radeon_ring_write(ring, sq_tex_resource_word1);
254 radeon_ring_write(ring, gpu_addr >> 8);
255 radeon_ring_write(ring, gpu_addr >> 8);
256 radeon_ring_write(ring, sq_tex_resource_word4);
257 radeon_ring_write(ring, 0);
258 radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
259}
260
261/* emits 12 */
262static void
263set_scissors(struct radeon_device *rdev, int x1, int y1,
264 int x2, int y2)
265{
266 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
267 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
268 radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
269 radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
270 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
271
272 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
273 radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
274 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
275 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
276
277 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
278 radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
279 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
280 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
281}
282
283/* emits 10 */
284static void
285draw_auto(struct radeon_device *rdev)
286{
287 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
288 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
289 radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
290 radeon_ring_write(ring, DI_PT_RECTLIST);
291
292 radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
293 radeon_ring_write(ring,
294#ifdef __BIG_ENDIAN
295 (2 << 2) |
296#endif
297 DI_INDEX_SIZE_16_BIT);
298
299 radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
300 radeon_ring_write(ring, 1);
301
302 radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
303 radeon_ring_write(ring, 3);
304 radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
305
306}
307
308/* emits 14 */
309static void
310set_default_state(struct radeon_device *rdev)
311{
312 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
313 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
314 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
315 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
316 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
317 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
318 u64 gpu_addr;
319 int dwords;
320
321 switch (rdev->family) {
322 case CHIP_R600:
323 num_ps_gprs = 192;
324 num_vs_gprs = 56;
325 num_temp_gprs = 4;
326 num_gs_gprs = 0;
327 num_es_gprs = 0;
328 num_ps_threads = 136;
329 num_vs_threads = 48;
330 num_gs_threads = 4;
331 num_es_threads = 4;
332 num_ps_stack_entries = 128;
333 num_vs_stack_entries = 128;
334 num_gs_stack_entries = 0;
335 num_es_stack_entries = 0;
336 break;
337 case CHIP_RV630:
338 case CHIP_RV635:
339 num_ps_gprs = 84;
340 num_vs_gprs = 36;
341 num_temp_gprs = 4;
342 num_gs_gprs = 0;
343 num_es_gprs = 0;
344 num_ps_threads = 144;
345 num_vs_threads = 40;
346 num_gs_threads = 4;
347 num_es_threads = 4;
348 num_ps_stack_entries = 40;
349 num_vs_stack_entries = 40;
350 num_gs_stack_entries = 32;
351 num_es_stack_entries = 16;
352 break;
353 case CHIP_RV610:
354 case CHIP_RV620:
355 case CHIP_RS780:
356 case CHIP_RS880:
357 default:
358 num_ps_gprs = 84;
359 num_vs_gprs = 36;
360 num_temp_gprs = 4;
361 num_gs_gprs = 0;
362 num_es_gprs = 0;
363 num_ps_threads = 136;
364 num_vs_threads = 48;
365 num_gs_threads = 4;
366 num_es_threads = 4;
367 num_ps_stack_entries = 40;
368 num_vs_stack_entries = 40;
369 num_gs_stack_entries = 32;
370 num_es_stack_entries = 16;
371 break;
372 case CHIP_RV670:
373 num_ps_gprs = 144;
374 num_vs_gprs = 40;
375 num_temp_gprs = 4;
376 num_gs_gprs = 0;
377 num_es_gprs = 0;
378 num_ps_threads = 136;
379 num_vs_threads = 48;
380 num_gs_threads = 4;
381 num_es_threads = 4;
382 num_ps_stack_entries = 40;
383 num_vs_stack_entries = 40;
384 num_gs_stack_entries = 32;
385 num_es_stack_entries = 16;
386 break;
387 case CHIP_RV770:
388 num_ps_gprs = 192;
389 num_vs_gprs = 56;
390 num_temp_gprs = 4;
391 num_gs_gprs = 0;
392 num_es_gprs = 0;
393 num_ps_threads = 188;
394 num_vs_threads = 60;
395 num_gs_threads = 0;
396 num_es_threads = 0;
397 num_ps_stack_entries = 256;
398 num_vs_stack_entries = 256;
399 num_gs_stack_entries = 0;
400 num_es_stack_entries = 0;
401 break;
402 case CHIP_RV730:
403 case CHIP_RV740:
404 num_ps_gprs = 84;
405 num_vs_gprs = 36;
406 num_temp_gprs = 4;
407 num_gs_gprs = 0;
408 num_es_gprs = 0;
409 num_ps_threads = 188;
410 num_vs_threads = 60;
411 num_gs_threads = 0;
412 num_es_threads = 0;
413 num_ps_stack_entries = 128;
414 num_vs_stack_entries = 128;
415 num_gs_stack_entries = 0;
416 num_es_stack_entries = 0;
417 break;
418 case CHIP_RV710:
419 num_ps_gprs = 192;
420 num_vs_gprs = 56;
421 num_temp_gprs = 4;
422 num_gs_gprs = 0;
423 num_es_gprs = 0;
424 num_ps_threads = 144;
425 num_vs_threads = 48;
426 num_gs_threads = 0;
427 num_es_threads = 0;
428 num_ps_stack_entries = 128;
429 num_vs_stack_entries = 128;
430 num_gs_stack_entries = 0;
431 num_es_stack_entries = 0;
432 break;
433 }
434
435 if ((rdev->family == CHIP_RV610) ||
436 (rdev->family == CHIP_RV620) ||
437 (rdev->family == CHIP_RS780) ||
438 (rdev->family == CHIP_RS880) ||
439 (rdev->family == CHIP_RV710))
440 sq_config = 0;
441 else
442 sq_config = VC_ENABLE;
443
444 sq_config |= (DX9_CONSTS |
445 ALU_INST_PREFER_VECTOR |
446 PS_PRIO(0) |
447 VS_PRIO(1) |
448 GS_PRIO(2) |
449 ES_PRIO(3));
450
451 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
452 NUM_VS_GPRS(num_vs_gprs) |
453 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
454 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
455 NUM_ES_GPRS(num_es_gprs));
456 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
457 NUM_VS_THREADS(num_vs_threads) |
458 NUM_GS_THREADS(num_gs_threads) |
459 NUM_ES_THREADS(num_es_threads));
460 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
461 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
462 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
463 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
464
465 /* emit an IB pointing at default state */
466 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
467 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
468 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
469 radeon_ring_write(ring,
470#ifdef __BIG_ENDIAN
471 (2 << 0) |
472#endif
473 (gpu_addr & 0xFFFFFFFC));
474 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
475 radeon_ring_write(ring, dwords);
476
477 /* SQ config */
478 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
479 radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
480 radeon_ring_write(ring, sq_config);
481 radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
482 radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
483 radeon_ring_write(ring, sq_thread_resource_mgmt);
484 radeon_ring_write(ring, sq_stack_resource_mgmt_1);
485 radeon_ring_write(ring, sq_stack_resource_mgmt_2);
486}
487
488int r600_blit_init(struct radeon_device *rdev)
489{
490 u32 obj_size;
491 int i, r, dwords;
492 void *ptr;
493 u32 packet2s[16];
494 int num_packet2s = 0;
495
496 rdev->r600_blit.primitives.set_render_target = set_render_target;
497 rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
498 rdev->r600_blit.primitives.set_shaders = set_shaders;
499 rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
500 rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
501 rdev->r600_blit.primitives.set_scissors = set_scissors;
502 rdev->r600_blit.primitives.draw_auto = draw_auto;
503 rdev->r600_blit.primitives.set_default_state = set_default_state;
504
505 rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
506 rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
507 rdev->r600_blit.ring_size_common += 5; /* done copy */
508 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
509
510 rdev->r600_blit.ring_size_per_loop = 76;
511 /* set_render_target emits 2 extra dwords on rv6xx */
512 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
513 rdev->r600_blit.ring_size_per_loop += 2;
514
515 rdev->r600_blit.max_dim = 8192;
516
517 rdev->r600_blit.state_offset = 0;
518
519 if (rdev->family >= CHIP_RV770)
520 rdev->r600_blit.state_len = r7xx_default_size;
521 else
522 rdev->r600_blit.state_len = r6xx_default_size;
523
524 dwords = rdev->r600_blit.state_len;
525 while (dwords & 0xf) {
526 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
527 dwords++;
528 }
529
530 obj_size = dwords * 4;
531 obj_size = ALIGN(obj_size, 256);
532
533 rdev->r600_blit.vs_offset = obj_size;
534 obj_size += r6xx_vs_size * 4;
535 obj_size = ALIGN(obj_size, 256);
536
537 rdev->r600_blit.ps_offset = obj_size;
538 obj_size += r6xx_ps_size * 4;
539 obj_size = ALIGN(obj_size, 256);
540
541 /* pin copy shader into vram if not already initialized */
542 if (rdev->r600_blit.shader_obj == NULL) {
543 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
544 RADEON_GEM_DOMAIN_VRAM,
545 NULL, &rdev->r600_blit.shader_obj);
546 if (r) {
547 DRM_ERROR("r600 failed to allocate shader\n");
548 return r;
549 }
550
551 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
552 if (unlikely(r != 0))
553 return r;
554 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
555 &rdev->r600_blit.shader_gpu_addr);
556 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
557 if (r) {
558 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
559 return r;
560 }
561 }
562
563 DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
564 obj_size,
565 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
566
567 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
568 if (unlikely(r != 0))
569 return r;
570 r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
571 if (r) {
572 DRM_ERROR("failed to map blit object %d\n", r);
573 return r;
574 }
575 if (rdev->family >= CHIP_RV770)
576 memcpy_toio(ptr + rdev->r600_blit.state_offset,
577 r7xx_default_state, rdev->r600_blit.state_len * 4);
578 else
579 memcpy_toio(ptr + rdev->r600_blit.state_offset,
580 r6xx_default_state, rdev->r600_blit.state_len * 4);
581 if (num_packet2s)
582 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
583 packet2s, num_packet2s * 4);
584 for (i = 0; i < r6xx_vs_size; i++)
585 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
586 for (i = 0; i < r6xx_ps_size; i++)
587 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
588 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
589 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
590
591 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
592 return 0;
593}
594
595void r600_blit_fini(struct radeon_device *rdev)
596{
597 int r;
598
599 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
600 if (rdev->r600_blit.shader_obj == NULL)
601 return;
602 /* If we can't reserve the bo, unref should be enough to destroy
603 * it when it becomes idle.
604 */
605 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
606 if (!r) {
607 radeon_bo_unpin(rdev->r600_blit.shader_obj);
608 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
609 }
610 radeon_bo_unref(&rdev->r600_blit.shader_obj);
611}
612
613static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
614 int *width, int *height, int max_dim)
615{
616 unsigned max_pages;
617 unsigned pages = num_gpu_pages;
618 int w, h;
619
620 if (num_gpu_pages == 0) {
621 /* not supposed to be called with no pages, but just in case */
622 h = 0;
623 w = 0;
624 pages = 0;
625 WARN_ON(1);
626 } else {
627 int rect_order = 2;
628 h = RECT_UNIT_H;
629 while (num_gpu_pages / rect_order) {
630 h *= 2;
631 rect_order *= 4;
632 if (h >= max_dim) {
633 h = max_dim;
634 break;
635 }
636 }
637 max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
638 if (pages > max_pages)
639 pages = max_pages;
640 w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
641 w = (w / RECT_UNIT_W) * RECT_UNIT_W;
642 pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
643 BUG_ON(pages == 0);
644 }
645
646
647 DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
648
649 /* return width and height only of the caller wants it */
650 if (height)
651 *height = h;
652 if (width)
653 *width = w;
654
655 return pages;
656}
657
658
659int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
660 struct radeon_fence **fence, struct radeon_sa_bo **vb,
661 struct radeon_semaphore **sem)
662{
663 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
664 int r;
665 int ring_size;
666 int num_loops = 0;
667 int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
668
669 /* num loops */
670 while (num_gpu_pages) {
671 num_gpu_pages -=
672 r600_blit_create_rect(num_gpu_pages, NULL, NULL,
673 rdev->r600_blit.max_dim);
674 num_loops++;
675 }
676
677 /* 48 bytes for vertex per loop */
678 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
679 (num_loops*48)+256, 256, true);
680 if (r) {
681 return r;
682 }
683
684 r = radeon_semaphore_create(rdev, sem);
685 if (r) {
686 radeon_sa_bo_free(rdev, vb, NULL);
687 return r;
688 }
689
690 /* calculate number of loops correctly */
691 ring_size = num_loops * dwords_per_loop;
692 ring_size += rdev->r600_blit.ring_size_common;
693 r = radeon_ring_lock(rdev, ring, ring_size);
694 if (r) {
695 radeon_sa_bo_free(rdev, vb, NULL);
696 radeon_semaphore_free(rdev, sem, NULL);
697 return r;
698 }
699
700 if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
701 radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
702 RADEON_RING_TYPE_GFX_INDEX);
703 radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
704 } else {
705 radeon_semaphore_free(rdev, sem, NULL);
706 }
707
708 rdev->r600_blit.primitives.set_default_state(rdev);
709 rdev->r600_blit.primitives.set_shaders(rdev);
710 return 0;
711}
712
713void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
714 struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
715{
716 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
717 int r;
718
719 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
720 if (r) {
721 radeon_ring_unlock_undo(rdev, ring);
722 return;
723 }
724
725 radeon_ring_unlock_commit(rdev, ring);
726 radeon_sa_bo_free(rdev, &vb, *fence);
727 radeon_semaphore_free(rdev, &sem, *fence);
728}
729
730void r600_kms_blit_copy(struct radeon_device *rdev,
731 u64 src_gpu_addr, u64 dst_gpu_addr,
732 unsigned num_gpu_pages,
733 struct radeon_sa_bo *vb)
734{
735 u64 vb_gpu_addr;
736 u32 *vb_cpu_addr;
737
738 DRM_DEBUG("emitting copy %16llx %16llx %d\n",
739 src_gpu_addr, dst_gpu_addr, num_gpu_pages);
740 vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
741 vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
742
743 while (num_gpu_pages) {
744 int w, h;
745 unsigned size_in_bytes;
746 unsigned pages_per_loop =
747 r600_blit_create_rect(num_gpu_pages, &w, &h,
748 rdev->r600_blit.max_dim);
749
750 size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
751 DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
752
753 vb_cpu_addr[0] = 0;
754 vb_cpu_addr[1] = 0;
755 vb_cpu_addr[2] = 0;
756 vb_cpu_addr[3] = 0;
757
758 vb_cpu_addr[4] = 0;
759 vb_cpu_addr[5] = int2float(h);
760 vb_cpu_addr[6] = 0;
761 vb_cpu_addr[7] = int2float(h);
762
763 vb_cpu_addr[8] = int2float(w);
764 vb_cpu_addr[9] = int2float(h);
765 vb_cpu_addr[10] = int2float(w);
766 vb_cpu_addr[11] = int2float(h);
767
768 rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
769 w, h, w, src_gpu_addr, size_in_bytes);
770 rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
771 w, h, dst_gpu_addr);
772 rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
773 rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
774 rdev->r600_blit.primitives.draw_auto(rdev);
775 rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
776 PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
777 size_in_bytes, dst_gpu_addr);
778
779 vb_cpu_addr += 12;
780 vb_gpu_addr += 4*12;
781 src_gpu_addr += size_in_bytes;
782 dst_gpu_addr += size_in_bytes;
783 num_gpu_pages -= pages_per_loop;
784 }
785}
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index 2f3ce7a75976..f437d36dd98c 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -35,5 +35,4 @@ extern const u32 r6xx_default_state[];
35extern const u32 r6xx_ps_size, r6xx_vs_size; 35extern const u32 r6xx_ps_size, r6xx_vs_size;
36extern const u32 r6xx_default_size, r7xx_default_size; 36extern const u32 r6xx_default_size, r7xx_default_size;
37 37
38__pure uint32_t int2float(uint32_t x);
39#endif 38#endif
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 1c51c08b1fde..d8eb48bff0ed 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2200 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle 2200 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
2201 + init->ring_size / sizeof(u32)); 2201 + init->ring_size / sizeof(u32));
2202 dev_priv->ring.size = init->ring_size; 2202 dev_priv->ring.size = init->ring_size;
2203 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); 2203 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
2204 2204
2205 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; 2205 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
2206 dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8); 2206 dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
2207 2207
2208 dev_priv->ring.fetch_size = /* init->fetch_size */ 32; 2208 dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
2209 dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16); 2209 dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
2210 2210
2211 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 2211 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
2212 2212
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
new file mode 100644
index 000000000000..3b317456512a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -0,0 +1,497 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "r600d.h"
28
29u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
30
31/*
32 * DMA
33 * Starting with R600, the GPU has an asynchronous
34 * DMA engine. The programming model is very similar
35 * to the 3D engine (ring buffer, IBs, etc.), but the
36 * DMA controller has it's own packet format that is
37 * different form the PM4 format used by the 3D engine.
38 * It supports copying data, writing embedded data,
39 * solid fills, and a number of other things. It also
40 * has support for tiling/detiling of buffers.
41 */
42
43/**
44 * r600_dma_get_rptr - get the current read pointer
45 *
46 * @rdev: radeon_device pointer
47 * @ring: radeon ring pointer
48 *
49 * Get the current rptr from the hardware (r6xx+).
50 */
51uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
52 struct radeon_ring *ring)
53{
54 return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
55}
56
57/**
58 * r600_dma_get_wptr - get the current write pointer
59 *
60 * @rdev: radeon_device pointer
61 * @ring: radeon ring pointer
62 *
63 * Get the current wptr from the hardware (r6xx+).
64 */
65uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
66 struct radeon_ring *ring)
67{
68 return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
69}
70
71/**
72 * r600_dma_set_wptr - commit the write pointer
73 *
74 * @rdev: radeon_device pointer
75 * @ring: radeon ring pointer
76 *
77 * Write the wptr back to the hardware (r6xx+).
78 */
79void r600_dma_set_wptr(struct radeon_device *rdev,
80 struct radeon_ring *ring)
81{
82 WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
83}
84
85/**
86 * r600_dma_stop - stop the async dma engine
87 *
88 * @rdev: radeon_device pointer
89 *
90 * Stop the async dma engine (r6xx-evergreen).
91 */
92void r600_dma_stop(struct radeon_device *rdev)
93{
94 u32 rb_cntl = RREG32(DMA_RB_CNTL);
95
96 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
97
98 rb_cntl &= ~DMA_RB_ENABLE;
99 WREG32(DMA_RB_CNTL, rb_cntl);
100
101 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
102}
103
104/**
105 * r600_dma_resume - setup and start the async dma engine
106 *
107 * @rdev: radeon_device pointer
108 *
109 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
110 * Returns 0 for success, error for failure.
111 */
112int r600_dma_resume(struct radeon_device *rdev)
113{
114 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
115 u32 rb_cntl, dma_cntl, ib_cntl;
116 u32 rb_bufsz;
117 int r;
118
119 /* Reset dma */
120 if (rdev->family >= CHIP_RV770)
121 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
122 else
123 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
124 RREG32(SRBM_SOFT_RESET);
125 udelay(50);
126 WREG32(SRBM_SOFT_RESET, 0);
127
128 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
129 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
130
131 /* Set ring buffer size in dwords */
132 rb_bufsz = order_base_2(ring->ring_size / 4);
133 rb_cntl = rb_bufsz << 1;
134#ifdef __BIG_ENDIAN
135 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
136#endif
137 WREG32(DMA_RB_CNTL, rb_cntl);
138
139 /* Initialize the ring buffer's read and write pointers */
140 WREG32(DMA_RB_RPTR, 0);
141 WREG32(DMA_RB_WPTR, 0);
142
143 /* set the wb address whether it's enabled or not */
144 WREG32(DMA_RB_RPTR_ADDR_HI,
145 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
146 WREG32(DMA_RB_RPTR_ADDR_LO,
147 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
148
149 if (rdev->wb.enabled)
150 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
151
152 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
153
154 /* enable DMA IBs */
155 ib_cntl = DMA_IB_ENABLE;
156#ifdef __BIG_ENDIAN
157 ib_cntl |= DMA_IB_SWAP_ENABLE;
158#endif
159 WREG32(DMA_IB_CNTL, ib_cntl);
160
161 dma_cntl = RREG32(DMA_CNTL);
162 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
163 WREG32(DMA_CNTL, dma_cntl);
164
165 if (rdev->family >= CHIP_RV770)
166 WREG32(DMA_MODE, 1);
167
168 ring->wptr = 0;
169 WREG32(DMA_RB_WPTR, ring->wptr << 2);
170
171 ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
172
173 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
174
175 ring->ready = true;
176
177 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
178 if (r) {
179 ring->ready = false;
180 return r;
181 }
182
183 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
184
185 return 0;
186}
187
188/**
189 * r600_dma_fini - tear down the async dma engine
190 *
191 * @rdev: radeon_device pointer
192 *
193 * Stop the async dma engine and free the ring (r6xx-evergreen).
194 */
195void r600_dma_fini(struct radeon_device *rdev)
196{
197 r600_dma_stop(rdev);
198 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
199}
200
201/**
202 * r600_dma_is_lockup - Check if the DMA engine is locked up
203 *
204 * @rdev: radeon_device pointer
205 * @ring: radeon_ring structure holding ring information
206 *
207 * Check if the async DMA engine is locked up.
208 * Returns true if the engine appears to be locked up, false if not.
209 */
210bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
211{
212 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
213
214 if (!(reset_mask & RADEON_RESET_DMA)) {
215 radeon_ring_lockup_update(ring);
216 return false;
217 }
218 /* force ring activities */
219 radeon_ring_force_activity(rdev, ring);
220 return radeon_ring_test_lockup(rdev, ring);
221}
222
223
224/**
225 * r600_dma_ring_test - simple async dma engine test
226 *
227 * @rdev: radeon_device pointer
228 * @ring: radeon_ring structure holding ring information
229 *
230 * Test the DMA engine by writing using it to write an
231 * value to memory. (r6xx-SI).
232 * Returns 0 for success, error for failure.
233 */
234int r600_dma_ring_test(struct radeon_device *rdev,
235 struct radeon_ring *ring)
236{
237 unsigned i;
238 int r;
239 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
240 u32 tmp;
241
242 if (!ptr) {
243 DRM_ERROR("invalid vram scratch pointer\n");
244 return -EINVAL;
245 }
246
247 tmp = 0xCAFEDEAD;
248 writel(tmp, ptr);
249
250 r = radeon_ring_lock(rdev, ring, 4);
251 if (r) {
252 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
253 return r;
254 }
255 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
256 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
257 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
258 radeon_ring_write(ring, 0xDEADBEEF);
259 radeon_ring_unlock_commit(rdev, ring);
260
261 for (i = 0; i < rdev->usec_timeout; i++) {
262 tmp = readl(ptr);
263 if (tmp == 0xDEADBEEF)
264 break;
265 DRM_UDELAY(1);
266 }
267
268 if (i < rdev->usec_timeout) {
269 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
270 } else {
271 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
272 ring->idx, tmp);
273 r = -EINVAL;
274 }
275 return r;
276}
277
278/**
279 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
280 *
281 * @rdev: radeon_device pointer
282 * @fence: radeon fence object
283 *
284 * Add a DMA fence packet to the ring to write
285 * the fence seq number and DMA trap packet to generate
286 * an interrupt if needed (r6xx-r7xx).
287 */
288void r600_dma_fence_ring_emit(struct radeon_device *rdev,
289 struct radeon_fence *fence)
290{
291 struct radeon_ring *ring = &rdev->ring[fence->ring];
292 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
293
294 /* write the fence */
295 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
296 radeon_ring_write(ring, addr & 0xfffffffc);
297 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
298 radeon_ring_write(ring, lower_32_bits(fence->seq));
299 /* generate an interrupt */
300 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
301}
302
303/**
304 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
305 *
306 * @rdev: radeon_device pointer
307 * @ring: radeon_ring structure holding ring information
308 * @semaphore: radeon semaphore object
309 * @emit_wait: wait or signal semaphore
310 *
311 * Add a DMA semaphore packet to the ring wait on or signal
312 * other rings (r6xx-SI).
313 */
314void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
315 struct radeon_ring *ring,
316 struct radeon_semaphore *semaphore,
317 bool emit_wait)
318{
319 u64 addr = semaphore->gpu_addr;
320 u32 s = emit_wait ? 0 : 1;
321
322 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
323 radeon_ring_write(ring, addr & 0xfffffffc);
324 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
325}
326
327/**
328 * r600_dma_ib_test - test an IB on the DMA engine
329 *
330 * @rdev: radeon_device pointer
331 * @ring: radeon_ring structure holding ring information
332 *
333 * Test a simple IB in the DMA ring (r6xx-SI).
334 * Returns 0 on success, error on failure.
335 */
336int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
337{
338 struct radeon_ib ib;
339 unsigned i;
340 int r;
341 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
342 u32 tmp = 0;
343
344 if (!ptr) {
345 DRM_ERROR("invalid vram scratch pointer\n");
346 return -EINVAL;
347 }
348
349 tmp = 0xCAFEDEAD;
350 writel(tmp, ptr);
351
352 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
353 if (r) {
354 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
355 return r;
356 }
357
358 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
359 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
360 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
361 ib.ptr[3] = 0xDEADBEEF;
362 ib.length_dw = 4;
363
364 r = radeon_ib_schedule(rdev, &ib, NULL);
365 if (r) {
366 radeon_ib_free(rdev, &ib);
367 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
368 return r;
369 }
370 r = radeon_fence_wait(ib.fence, false);
371 if (r) {
372 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
373 return r;
374 }
375 for (i = 0; i < rdev->usec_timeout; i++) {
376 tmp = readl(ptr);
377 if (tmp == 0xDEADBEEF)
378 break;
379 DRM_UDELAY(1);
380 }
381 if (i < rdev->usec_timeout) {
382 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
383 } else {
384 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
385 r = -EINVAL;
386 }
387 radeon_ib_free(rdev, &ib);
388 return r;
389}
390
391/**
392 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
393 *
394 * @rdev: radeon_device pointer
395 * @ib: IB object to schedule
396 *
397 * Schedule an IB in the DMA ring (r6xx-r7xx).
398 */
399void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
400{
401 struct radeon_ring *ring = &rdev->ring[ib->ring];
402
403 if (rdev->wb.enabled) {
404 u32 next_rptr = ring->wptr + 4;
405 while ((next_rptr & 7) != 5)
406 next_rptr++;
407 next_rptr += 3;
408 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
409 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
410 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
411 radeon_ring_write(ring, next_rptr);
412 }
413
414 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
415 * Pad as necessary with NOPs.
416 */
417 while ((ring->wptr & 7) != 5)
418 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
419 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
420 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
421 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
422
423}
424
425/**
426 * r600_copy_dma - copy pages using the DMA engine
427 *
428 * @rdev: radeon_device pointer
429 * @src_offset: src GPU address
430 * @dst_offset: dst GPU address
431 * @num_gpu_pages: number of GPU pages to xfer
432 * @fence: radeon fence object
433 *
434 * Copy GPU paging using the DMA engine (r6xx).
435 * Used by the radeon ttm implementation to move pages if
436 * registered as the asic copy callback.
437 */
438int r600_copy_dma(struct radeon_device *rdev,
439 uint64_t src_offset, uint64_t dst_offset,
440 unsigned num_gpu_pages,
441 struct radeon_fence **fence)
442{
443 struct radeon_semaphore *sem = NULL;
444 int ring_index = rdev->asic->copy.dma_ring_index;
445 struct radeon_ring *ring = &rdev->ring[ring_index];
446 u32 size_in_dw, cur_size_in_dw;
447 int i, num_loops;
448 int r = 0;
449
450 r = radeon_semaphore_create(rdev, &sem);
451 if (r) {
452 DRM_ERROR("radeon: moving bo (%d).\n", r);
453 return r;
454 }
455
456 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
457 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
458 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
459 if (r) {
460 DRM_ERROR("radeon: moving bo (%d).\n", r);
461 radeon_semaphore_free(rdev, &sem, NULL);
462 return r;
463 }
464
465 if (radeon_fence_need_sync(*fence, ring->idx)) {
466 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
467 ring->idx);
468 radeon_fence_note_sync(*fence, ring->idx);
469 } else {
470 radeon_semaphore_free(rdev, &sem, NULL);
471 }
472
473 for (i = 0; i < num_loops; i++) {
474 cur_size_in_dw = size_in_dw;
475 if (cur_size_in_dw > 0xFFFE)
476 cur_size_in_dw = 0xFFFE;
477 size_in_dw -= cur_size_in_dw;
478 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
479 radeon_ring_write(ring, dst_offset & 0xfffffffc);
480 radeon_ring_write(ring, src_offset & 0xfffffffc);
481 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
482 (upper_32_bits(src_offset) & 0xff)));
483 src_offset += cur_size_in_dw * 4;
484 dst_offset += cur_size_in_dw * 4;
485 }
486
487 r = radeon_fence_emit(rdev, fence, ring->idx);
488 if (r) {
489 radeon_ring_unlock_undo(rdev, ring);
490 return r;
491 }
492
493 radeon_ring_unlock_commit(rdev, ring);
494 radeon_semaphore_free(rdev, &sem, *fence);
495
496 return r;
497}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index b88f54b134ab..fa0de46fcc0d 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -174,6 +174,24 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
174 return vblank_time_us; 174 return vblank_time_us;
175} 175}
176 176
177u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
178{
179 struct drm_device *dev = rdev->ddev;
180 struct drm_crtc *crtc;
181 struct radeon_crtc *radeon_crtc;
182 u32 vrefresh = 0;
183
184 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
185 radeon_crtc = to_radeon_crtc(crtc);
186 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
187 vrefresh = radeon_crtc->hw_mode.vrefresh;
188 break;
189 }
190 }
191
192 return vrefresh;
193}
194
177void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 195void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
178 u32 *p, u32 *u) 196 u32 *p, u32 *u)
179{ 197{
@@ -278,9 +296,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev)
278void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) 296void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
279{ 297{
280 if (enable) 298 if (enable)
281 WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF); 299 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
282 else 300 else
283 WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); 301 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
284} 302}
285 303
286void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) 304void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
@@ -745,6 +763,8 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
745 case THERMAL_TYPE_SUMO: 763 case THERMAL_TYPE_SUMO:
746 case THERMAL_TYPE_NI: 764 case THERMAL_TYPE_NI:
747 case THERMAL_TYPE_SI: 765 case THERMAL_TYPE_SI:
766 case THERMAL_TYPE_CI:
767 case THERMAL_TYPE_KV:
748 return true; 768 return true;
749 case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 769 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
750 case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 770 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
@@ -779,15 +799,19 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen
779 u32 size = atom_table->ucNumEntries * 799 u32 size = atom_table->ucNumEntries *
780 sizeof(struct radeon_clock_voltage_dependency_entry); 800 sizeof(struct radeon_clock_voltage_dependency_entry);
781 int i; 801 int i;
802 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
782 803
783 radeon_table->entries = kzalloc(size, GFP_KERNEL); 804 radeon_table->entries = kzalloc(size, GFP_KERNEL);
784 if (!radeon_table->entries) 805 if (!radeon_table->entries)
785 return -ENOMEM; 806 return -ENOMEM;
786 807
808 entry = &atom_table->entries[0];
787 for (i = 0; i < atom_table->ucNumEntries; i++) { 809 for (i = 0; i < atom_table->ucNumEntries; i++) {
788 radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) | 810 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
789 (atom_table->entries[i].ucClockHigh << 16); 811 (entry->ucClockHigh << 16);
790 radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage); 812 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
813 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
814 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
791 } 815 }
792 radeon_table->count = atom_table->ucNumEntries; 816 radeon_table->count = atom_table->ucNumEntries;
793 817
@@ -875,6 +899,19 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
875 return ret; 899 return ret;
876 } 900 }
877 } 901 }
902 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
903 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
904 (mode_info->atom_context->bios + data_offset +
905 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
906 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
907 dep_table);
908 if (ret) {
909 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
910 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
911 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
912 return ret;
913 }
914 }
878 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { 915 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
879 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = 916 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
880 (ATOM_PPLIB_Clock_Voltage_Limit_Table *) 917 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
@@ -898,27 +935,27 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
898 (ATOM_PPLIB_PhaseSheddingLimits_Table *) 935 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
899 (mode_info->atom_context->bios + data_offset + 936 (mode_info->atom_context->bios + data_offset +
900 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); 937 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
938 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
901 939
902 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 940 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
903 kzalloc(psl->ucNumEntries * 941 kzalloc(psl->ucNumEntries *
904 sizeof(struct radeon_phase_shedding_limits_entry), 942 sizeof(struct radeon_phase_shedding_limits_entry),
905 GFP_KERNEL); 943 GFP_KERNEL);
906 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 944 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
907 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 945 r600_free_extended_power_table(rdev);
908 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
909 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
910 return -ENOMEM; 946 return -ENOMEM;
911 } 947 }
912 948
949 entry = &psl->entries[0];
913 for (i = 0; i < psl->ucNumEntries; i++) { 950 for (i = 0; i < psl->ucNumEntries; i++) {
914 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = 951 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
915 le16_to_cpu(psl->entries[i].usSclkLow) | 952 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
916 (psl->entries[i].ucSclkHigh << 16);
917 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = 953 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
918 le16_to_cpu(psl->entries[i].usMclkLow) | 954 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
919 (psl->entries[i].ucMclkHigh << 16);
920 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = 955 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
921 le16_to_cpu(psl->entries[i].usVoltage); 956 le16_to_cpu(entry->usVoltage);
957 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
958 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
922 } 959 }
923 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count = 960 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
924 psl->ucNumEntries; 961 psl->ucNumEntries;
@@ -945,30 +982,140 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
945 (ATOM_PPLIB_CAC_Leakage_Table *) 982 (ATOM_PPLIB_CAC_Leakage_Table *)
946 (mode_info->atom_context->bios + data_offset + 983 (mode_info->atom_context->bios + data_offset +
947 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); 984 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
985 ATOM_PPLIB_CAC_Leakage_Record *entry;
948 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); 986 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
949 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); 987 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
950 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { 988 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
951 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 989 r600_free_extended_power_table(rdev);
952 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
953 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
954 return -ENOMEM; 990 return -ENOMEM;
955 } 991 }
992 entry = &cac_table->entries[0];
956 for (i = 0; i < cac_table->ucNumEntries; i++) { 993 for (i = 0; i < cac_table->ucNumEntries; i++) {
957 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = 994 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
958 le16_to_cpu(cac_table->entries[i].usVddc); 995 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
959 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = 996 le16_to_cpu(entry->usVddc1);
960 le32_to_cpu(cac_table->entries[i].ulLeakageValue); 997 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
998 le16_to_cpu(entry->usVddc2);
999 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1000 le16_to_cpu(entry->usVddc3);
1001 } else {
1002 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1003 le16_to_cpu(entry->usVddc);
1004 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1005 le32_to_cpu(entry->ulLeakageValue);
1006 }
1007 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1008 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
961 } 1009 }
962 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; 1010 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
963 } 1011 }
964 } 1012 }
965 1013
966 /* ppm table */ 1014 /* ext tables */
967 if (le16_to_cpu(power_info->pplib.usTableSize) >= 1015 if (le16_to_cpu(power_info->pplib.usTableSize) >=
968 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 1016 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
969 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) 1017 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
970 (mode_info->atom_context->bios + data_offset + 1018 (mode_info->atom_context->bios + data_offset +
971 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); 1019 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1020 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1021 ext_hdr->usVCETableOffset) {
1022 VCEClockInfoArray *array = (VCEClockInfoArray *)
1023 (mode_info->atom_context->bios + data_offset +
1024 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1025 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1026 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1027 (mode_info->atom_context->bios + data_offset +
1028 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1029 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1030 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1031 u32 size = limits->numEntries *
1032 sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1033 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1034 kzalloc(size, GFP_KERNEL);
1035 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1036 r600_free_extended_power_table(rdev);
1037 return -ENOMEM;
1038 }
1039 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1040 limits->numEntries;
1041 entry = &limits->entries[0];
1042 for (i = 0; i < limits->numEntries; i++) {
1043 VCEClockInfo *vce_clk = (VCEClockInfo *)
1044 ((u8 *)&array->entries[0] +
1045 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1046 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1047 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1048 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1049 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1050 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1051 le16_to_cpu(entry->usVoltage);
1052 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1053 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1054 }
1055 }
1056 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1057 ext_hdr->usUVDTableOffset) {
1058 UVDClockInfoArray *array = (UVDClockInfoArray *)
1059 (mode_info->atom_context->bios + data_offset +
1060 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1061 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1062 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1063 (mode_info->atom_context->bios + data_offset +
1064 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1065 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1066 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1067 u32 size = limits->numEntries *
1068 sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1069 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1070 kzalloc(size, GFP_KERNEL);
1071 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1072 r600_free_extended_power_table(rdev);
1073 return -ENOMEM;
1074 }
1075 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1076 limits->numEntries;
1077 entry = &limits->entries[0];
1078 for (i = 0; i < limits->numEntries; i++) {
1079 UVDClockInfo *uvd_clk = (UVDClockInfo *)
1080 ((u8 *)&array->entries[0] +
1081 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1082 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1083 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1087 le16_to_cpu(limits->entries[i].usVoltage);
1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1090 }
1091 }
1092 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1093 ext_hdr->usSAMUTableOffset) {
1094 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1095 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1096 (mode_info->atom_context->bios + data_offset +
1097 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1098 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1099 u32 size = limits->numEntries *
1100 sizeof(struct radeon_clock_voltage_dependency_entry);
1101 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1102 kzalloc(size, GFP_KERNEL);
1103 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1104 r600_free_extended_power_table(rdev);
1105 return -ENOMEM;
1106 }
1107 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1108 limits->numEntries;
1109 entry = &limits->entries[0];
1110 for (i = 0; i < limits->numEntries; i++) {
1111 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1112 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1113 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1114 le16_to_cpu(entry->usVoltage);
1115 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1116 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1117 }
1118 }
972 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && 1119 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
973 ext_hdr->usPPMTableOffset) { 1120 ext_hdr->usPPMTableOffset) {
974 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) 1121 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
@@ -977,10 +1124,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
977 rdev->pm.dpm.dyn_state.ppm_table = 1124 rdev->pm.dpm.dyn_state.ppm_table =
978 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL); 1125 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
979 if (!rdev->pm.dpm.dyn_state.ppm_table) { 1126 if (!rdev->pm.dpm.dyn_state.ppm_table) {
980 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 1127 r600_free_extended_power_table(rdev);
981 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
982 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
983 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
984 return -ENOMEM; 1128 return -ENOMEM;
985 } 1129 }
986 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; 1130 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
@@ -1003,6 +1147,71 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1003 rdev->pm.dpm.dyn_state.ppm_table->tj_max = 1147 rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1004 le32_to_cpu(ppm->ulTjmax); 1148 le32_to_cpu(ppm->ulTjmax);
1005 } 1149 }
1150 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1151 ext_hdr->usACPTableOffset) {
1152 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1153 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1154 (mode_info->atom_context->bios + data_offset +
1155 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1156 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1157 u32 size = limits->numEntries *
1158 sizeof(struct radeon_clock_voltage_dependency_entry);
1159 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1160 kzalloc(size, GFP_KERNEL);
1161 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1162 r600_free_extended_power_table(rdev);
1163 return -ENOMEM;
1164 }
1165 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1166 limits->numEntries;
1167 entry = &limits->entries[0];
1168 for (i = 0; i < limits->numEntries; i++) {
1169 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1170 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1171 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1172 le16_to_cpu(entry->usVoltage);
1173 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1174 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1175 }
1176 }
1177 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1178 ext_hdr->usPowerTuneTableOffset) {
1179 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1180 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1181 ATOM_PowerTune_Table *pt;
1182 rdev->pm.dpm.dyn_state.cac_tdp_table =
1183 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1184 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1185 r600_free_extended_power_table(rdev);
1186 return -ENOMEM;
1187 }
1188 if (rev > 0) {
1189 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1190 (mode_info->atom_context->bios + data_offset +
1191 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1192 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1193 ppt->usMaximumPowerDeliveryLimit;
1194 pt = &ppt->power_tune_table;
1195 } else {
1196 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1197 (mode_info->atom_context->bios + data_offset +
1198 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1199 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1200 pt = &ppt->power_tune_table;
1201 }
1202 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1203 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1204 le16_to_cpu(pt->usConfigurableTDP);
1205 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1206 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1207 le16_to_cpu(pt->usBatteryPowerLimit);
1208 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1209 le16_to_cpu(pt->usSmallPowerLimit);
1210 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1211 le16_to_cpu(pt->usLowCACLeakage);
1212 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1213 le16_to_cpu(pt->usHighCACLeakage);
1214 }
1006 } 1215 }
1007 1216
1008 return 0; 1217 return 0;
@@ -1016,12 +1225,24 @@ void r600_free_extended_power_table(struct radeon_device *rdev)
1016 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 1225 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
1017 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) 1226 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
1018 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); 1227 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
1228 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries)
1229 kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries);
1019 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) 1230 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
1020 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); 1231 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
1021 if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) 1232 if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
1022 kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); 1233 kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
1023 if (rdev->pm.dpm.dyn_state.ppm_table) 1234 if (rdev->pm.dpm.dyn_state.ppm_table)
1024 kfree(rdev->pm.dpm.dyn_state.ppm_table); 1235 kfree(rdev->pm.dpm.dyn_state.ppm_table);
1236 if (rdev->pm.dpm.dyn_state.cac_tdp_table)
1237 kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
1238 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
1239 kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
1240 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
1241 kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
1242 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
1243 kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
1244 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
1245 kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
1025} 1246}
1026 1247
1027enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, 1248enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
@@ -1046,3 +1267,36 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1046 } 1267 }
1047 return RADEON_PCIE_GEN1; 1268 return RADEON_PCIE_GEN1;
1048} 1269}
1270
1271u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1272 u16 asic_lanes,
1273 u16 default_lanes)
1274{
1275 switch (asic_lanes) {
1276 case 0:
1277 default:
1278 return default_lanes;
1279 case 1:
1280 return 1;
1281 case 2:
1282 return 2;
1283 case 4:
1284 return 4;
1285 case 8:
1286 return 8;
1287 case 12:
1288 return 12;
1289 case 16:
1290 return 16;
1291 }
1292}
1293
1294u8 r600_encode_pci_lane_width(u32 lanes)
1295{
1296 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
1297
1298 if (lanes > 16)
1299 return 0;
1300
1301 return encoded_lanes[lanes];
1302}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 7c822d9ae53d..1000bf9719f2 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -130,6 +130,7 @@ void r600_dpm_print_cap_info(u32 caps);
130void r600_dpm_print_ps_status(struct radeon_device *rdev, 130void r600_dpm_print_ps_status(struct radeon_device *rdev,
131 struct radeon_ps *rps); 131 struct radeon_ps *rps);
132u32 r600_dpm_get_vblank_time(struct radeon_device *rdev); 132u32 r600_dpm_get_vblank_time(struct radeon_device *rdev);
133u32 r600_dpm_get_vrefresh(struct radeon_device *rdev);
133bool r600_is_uvd_state(u32 class, u32 class2); 134bool r600_is_uvd_state(u32 class, u32 class2);
134void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 135void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
135 u32 *p, u32 *u); 136 u32 *p, u32 *u);
@@ -224,4 +225,9 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
224 enum radeon_pcie_gen asic_gen, 225 enum radeon_pcie_gen asic_gen,
225 enum radeon_pcie_gen default_gen); 226 enum radeon_pcie_gen default_gen);
226 227
228u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
229 u16 asic_lanes,
230 u16 default_lanes);
231u8 r600_encode_pci_lane_width(u32 lanes);
232
227#endif 233#endif
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f48240bb8c56..f443010ce90b 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
226 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 226 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
227 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 227 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
228 u32 base_rate = 24000; 228 u32 base_rate = 24000;
229 u32 max_ratio = clock / base_rate;
230 u32 dto_phase;
231 u32 dto_modulo = clock;
232 u32 wallclock_ratio;
233 u32 dto_cntl;
229 234
230 if (!dig || !dig->afmt) 235 if (!dig || !dig->afmt)
231 return; 236 return;
232 237
238 if (max_ratio >= 8) {
239 dto_phase = 192 * 1000;
240 wallclock_ratio = 3;
241 } else if (max_ratio >= 4) {
242 dto_phase = 96 * 1000;
243 wallclock_ratio = 2;
244 } else if (max_ratio >= 2) {
245 dto_phase = 48 * 1000;
246 wallclock_ratio = 1;
247 } else {
248 dto_phase = 24 * 1000;
249 wallclock_ratio = 0;
250 }
251
233 /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. 252 /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
234 * doesn't matter which one you use. Just use the first one. 253 * doesn't matter which one you use. Just use the first one.
235 */ 254 */
@@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
242 /* according to the reg specs, this should DCE3.2 only, but in 261 /* according to the reg specs, this should DCE3.2 only, but in
243 * practice it seems to cover DCE3.0 as well. 262 * practice it seems to cover DCE3.0 as well.
244 */ 263 */
245 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); 264 if (dig->dig_encoder == 0) {
246 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); 265 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
247 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ 266 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
267 WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
268 WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
269 WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
270 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
271 } else {
272 dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
273 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
274 WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
275 WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
276 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
277 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
278 }
248 } else { 279 } else {
249 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ 280 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
250 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | 281 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
@@ -252,6 +283,107 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
252 } 283 }
253} 284}
254 285
286static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
287{
288 struct radeon_device *rdev = encoder->dev->dev_private;
289 struct drm_connector *connector;
290 struct radeon_connector *radeon_connector = NULL;
291 u32 tmp;
292 u8 *sadb;
293 int sad_count;
294
295 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
296 if (connector->encoder == encoder)
297 radeon_connector = to_radeon_connector(connector);
298 }
299
300 if (!radeon_connector) {
301 DRM_ERROR("Couldn't find encoder's connector\n");
302 return;
303 }
304
305 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
306 if (sad_count < 0) {
307 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
308 return;
309 }
310
311 /* program the speaker allocation */
312 tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
313 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
314 /* set HDMI mode */
315 tmp |= HDMI_CONNECTION;
316 if (sad_count)
317 tmp |= SPEAKER_ALLOCATION(sadb[0]);
318 else
319 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
320 WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
321
322 kfree(sadb);
323}
324
325static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
326{
327 struct radeon_device *rdev = encoder->dev->dev_private;
328 struct drm_connector *connector;
329 struct radeon_connector *radeon_connector = NULL;
330 struct cea_sad *sads;
331 int i, sad_count;
332
333 static const u16 eld_reg_to_type[][2] = {
334 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
335 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
336 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
337 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
338 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
339 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
340 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
341 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
342 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
343 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
344 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
345 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
346 };
347
348 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
349 if (connector->encoder == encoder)
350 radeon_connector = to_radeon_connector(connector);
351 }
352
353 if (!radeon_connector) {
354 DRM_ERROR("Couldn't find encoder's connector\n");
355 return;
356 }
357
358 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
359 if (sad_count < 0) {
360 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
361 return;
362 }
363 BUG_ON(!sads);
364
365 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
366 u32 value = 0;
367 int j;
368
369 for (j = 0; j < sad_count; j++) {
370 struct cea_sad *sad = &sads[j];
371
372 if (sad->format == eld_reg_to_type[i][1]) {
373 value = MAX_CHANNELS(sad->channels) |
374 DESCRIPTOR_BYTE_2(sad->byte2) |
375 SUPPORTED_FREQUENCIES(sad->freq);
376 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
377 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
378 break;
379 }
380 }
381 WREG32(eld_reg_to_type[i][0], value);
382 }
383
384 kfree(sads);
385}
386
255/* 387/*
256 * update the info frames with the data from the current display mode 388 * update the info frames with the data from the current display mode
257 */ 389 */
@@ -296,6 +428,11 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
296 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 428 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
297 } 429 }
298 430
431 if (ASIC_IS_DCE32(rdev)) {
432 dce3_2_afmt_write_speaker_allocation(encoder);
433 dce3_2_afmt_write_sad_regs(encoder);
434 }
435
299 WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 436 WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
300 HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ 437 HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
301 HDMI0_ACR_SOURCE); /* select SW CTS value */ 438 HDMI0_ACR_SOURCE); /* select SW CTS value */
@@ -351,7 +488,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
351 struct radeon_device *rdev = dev->dev_private; 488 struct radeon_device *rdev = dev->dev_private;
352 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 489 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
353 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 490 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
354 struct r600_audio audio = r600_audio_status(rdev); 491 struct r600_audio_pin audio = r600_audio_status(rdev);
355 uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE]; 492 uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
356 struct hdmi_audio_infoframe frame; 493 struct hdmi_audio_infoframe frame;
357 uint32_t offset; 494 uint32_t offset;
@@ -460,6 +597,11 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
460 if (!enable && !dig->afmt->enabled) 597 if (!enable && !dig->afmt->enabled)
461 return; 598 return;
462 599
600 if (enable)
601 dig->afmt->pin = r600_audio_get_pin(rdev);
602 else
603 dig->afmt->pin = NULL;
604
463 /* Older chipsets require setting HDMI and routing manually */ 605 /* Older chipsets require setting HDMI and routing manually */
464 if (!ASIC_IS_DCE3(rdev)) { 606 if (!ASIC_IS_DCE3(rdev)) {
465 if (enable) 607 if (enable)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 8e3fe815edab..454f90a849e4 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -933,6 +933,9 @@
933#define DCCG_AUDIO_DTO0_LOAD 0x051c 933#define DCCG_AUDIO_DTO0_LOAD 0x051c
934# define DTO_LOAD (1 << 31) 934# define DTO_LOAD (1 << 31)
935#define DCCG_AUDIO_DTO0_CNTL 0x0520 935#define DCCG_AUDIO_DTO0_CNTL 0x0520
936# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
937# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
938# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
936 939
937#define DCCG_AUDIO_DTO1_PHASE 0x0524 940#define DCCG_AUDIO_DTO1_PHASE 0x0524
938#define DCCG_AUDIO_DTO1_MODULE 0x0528 941#define DCCG_AUDIO_DTO1_MODULE 0x0528
@@ -957,6 +960,42 @@
957# define DIG_MODE_SDVO 4 960# define DIG_MODE_SDVO 4
958#define DIG1_CNTL 0x79a0 961#define DIG1_CNTL 0x79a0
959 962
963#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x71bc
964#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
965#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
966#define SPEAKER_ALLOCATION_SHIFT 0
967#define HDMI_CONNECTION (1 << 16)
968#define DP_CONNECTION (1 << 17)
969
970#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
971#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
972#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
973#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
974#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
975#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
976#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
977#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
978#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
979#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
980#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
981#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
982#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
983#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
984# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
985/* max channels minus one. 7 = 8 channels */
986# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
987# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
988# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
989/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
990 * bit0 = 32 kHz
991 * bit1 = 44.1 kHz
992 * bit2 = 48 kHz
993 * bit3 = 88.2 kHz
994 * bit4 = 96 kHz
995 * bit5 = 176.4 kHz
996 * bit6 = 192 kHz
997 */
998
960/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one 999/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
961 * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly 1000 * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
962 * different due to the new DIG blocks, but also have 2 instances. 1001 * different due to the new DIG blocks, but also have 2 instances.
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2f08219c39b6..ff8b564ce2b2 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -152,6 +152,47 @@ extern int radeon_aspm;
152#define RADEON_RESET_MC (1 << 10) 152#define RADEON_RESET_MC (1 << 10)
153#define RADEON_RESET_DISPLAY (1 << 11) 153#define RADEON_RESET_DISPLAY (1 << 11)
154 154
155/* CG block flags */
156#define RADEON_CG_BLOCK_GFX (1 << 0)
157#define RADEON_CG_BLOCK_MC (1 << 1)
158#define RADEON_CG_BLOCK_SDMA (1 << 2)
159#define RADEON_CG_BLOCK_UVD (1 << 3)
160#define RADEON_CG_BLOCK_VCE (1 << 4)
161#define RADEON_CG_BLOCK_HDP (1 << 5)
162#define RADEON_CG_BLOCK_BIF (1 << 6)
163
164/* CG flags */
165#define RADEON_CG_SUPPORT_GFX_MGCG (1 << 0)
166#define RADEON_CG_SUPPORT_GFX_MGLS (1 << 1)
167#define RADEON_CG_SUPPORT_GFX_CGCG (1 << 2)
168#define RADEON_CG_SUPPORT_GFX_CGLS (1 << 3)
169#define RADEON_CG_SUPPORT_GFX_CGTS (1 << 4)
170#define RADEON_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
171#define RADEON_CG_SUPPORT_GFX_CP_LS (1 << 6)
172#define RADEON_CG_SUPPORT_GFX_RLC_LS (1 << 7)
173#define RADEON_CG_SUPPORT_MC_LS (1 << 8)
174#define RADEON_CG_SUPPORT_MC_MGCG (1 << 9)
175#define RADEON_CG_SUPPORT_SDMA_LS (1 << 10)
176#define RADEON_CG_SUPPORT_SDMA_MGCG (1 << 11)
177#define RADEON_CG_SUPPORT_BIF_LS (1 << 12)
178#define RADEON_CG_SUPPORT_UVD_MGCG (1 << 13)
179#define RADEON_CG_SUPPORT_VCE_MGCG (1 << 14)
180#define RADEON_CG_SUPPORT_HDP_LS (1 << 15)
181#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
182
183/* PG flags */
184#define RADEON_PG_SUPPORT_GFX_CG (1 << 0)
185#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
186#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
187#define RADEON_PG_SUPPORT_UVD (1 << 3)
188#define RADEON_PG_SUPPORT_VCE (1 << 4)
189#define RADEON_PG_SUPPORT_CP (1 << 5)
190#define RADEON_PG_SUPPORT_GDS (1 << 6)
191#define RADEON_PG_SUPPORT_RLC_SMU_HS (1 << 7)
192#define RADEON_PG_SUPPORT_SDMA (1 << 8)
193#define RADEON_PG_SUPPORT_ACP (1 << 9)
194#define RADEON_PG_SUPPORT_SAMU (1 << 10)
195
155/* max cursor sizes (in pixels) */ 196/* max cursor sizes (in pixels) */
156#define CURSOR_WIDTH 64 197#define CURSOR_WIDTH 64
157#define CURSOR_HEIGHT 64 198#define CURSOR_HEIGHT 64
@@ -238,6 +279,12 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
238int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev, 279int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
239 u16 *voltage, 280 u16 *voltage,
240 u16 leakage_idx); 281 u16 leakage_idx);
282int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
283 u16 *leakage_id);
284int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
285 u16 *vddc, u16 *vddci,
286 u16 virtual_voltage_id,
287 u16 vbios_voltage_id);
241int radeon_atom_round_to_true_voltage(struct radeon_device *rdev, 288int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
242 u8 voltage_type, 289 u8 voltage_type,
243 u16 nominal_voltage, 290 u16 nominal_voltage,
@@ -492,9 +539,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
492int radeon_mode_dumb_mmap(struct drm_file *filp, 539int radeon_mode_dumb_mmap(struct drm_file *filp,
493 struct drm_device *dev, 540 struct drm_device *dev,
494 uint32_t handle, uint64_t *offset_p); 541 uint32_t handle, uint64_t *offset_p);
495int radeon_mode_dumb_destroy(struct drm_file *file_priv,
496 struct drm_device *dev,
497 uint32_t handle);
498 542
499/* 543/*
500 * Semaphores. 544 * Semaphores.
@@ -682,7 +726,7 @@ union radeon_irq_stat_regs {
682 726
683#define RADEON_MAX_HPD_PINS 6 727#define RADEON_MAX_HPD_PINS 6
684#define RADEON_MAX_CRTCS 6 728#define RADEON_MAX_CRTCS 6
685#define RADEON_MAX_AFMT_BLOCKS 6 729#define RADEON_MAX_AFMT_BLOCKS 7
686 730
687struct radeon_irq { 731struct radeon_irq {
688 bool installed; 732 bool installed;
@@ -746,8 +790,6 @@ struct radeon_ring {
746 uint32_t align_mask; 790 uint32_t align_mask;
747 uint32_t ptr_mask; 791 uint32_t ptr_mask;
748 bool ready; 792 bool ready;
749 u32 ptr_reg_shift;
750 u32 ptr_reg_mask;
751 u32 nop; 793 u32 nop;
752 u32 idx; 794 u32 idx;
753 u64 last_semaphore_signal_addr; 795 u64 last_semaphore_signal_addr;
@@ -844,35 +886,6 @@ struct r600_ih {
844 bool enabled; 886 bool enabled;
845}; 887};
846 888
847struct r600_blit_cp_primitives {
848 void (*set_render_target)(struct radeon_device *rdev, int format,
849 int w, int h, u64 gpu_addr);
850 void (*cp_set_surface_sync)(struct radeon_device *rdev,
851 u32 sync_type, u32 size,
852 u64 mc_addr);
853 void (*set_shaders)(struct radeon_device *rdev);
854 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
855 void (*set_tex_resource)(struct radeon_device *rdev,
856 int format, int w, int h, int pitch,
857 u64 gpu_addr, u32 size);
858 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
859 int x2, int y2);
860 void (*draw_auto)(struct radeon_device *rdev);
861 void (*set_default_state)(struct radeon_device *rdev);
862};
863
864struct r600_blit {
865 struct radeon_bo *shader_obj;
866 struct r600_blit_cp_primitives primitives;
867 int max_dim;
868 int ring_size_common;
869 int ring_size_per_loop;
870 u64 shader_gpu_addr;
871 u32 vs_offset, ps_offset;
872 u32 state_offset;
873 u32 state_len;
874};
875
876/* 889/*
877 * RLC stuff 890 * RLC stuff
878 */ 891 */
@@ -883,13 +896,19 @@ struct radeon_rlc {
883 struct radeon_bo *save_restore_obj; 896 struct radeon_bo *save_restore_obj;
884 uint64_t save_restore_gpu_addr; 897 uint64_t save_restore_gpu_addr;
885 volatile uint32_t *sr_ptr; 898 volatile uint32_t *sr_ptr;
886 u32 *reg_list; 899 const u32 *reg_list;
887 u32 reg_list_size; 900 u32 reg_list_size;
888 /* for clear state */ 901 /* for clear state */
889 struct radeon_bo *clear_state_obj; 902 struct radeon_bo *clear_state_obj;
890 uint64_t clear_state_gpu_addr; 903 uint64_t clear_state_gpu_addr;
891 volatile uint32_t *cs_ptr; 904 volatile uint32_t *cs_ptr;
892 struct cs_section_def *cs_data; 905 const struct cs_section_def *cs_data;
906 u32 clear_state_size;
907 /* for cp tables */
908 struct radeon_bo *cp_table_obj;
909 uint64_t cp_table_gpu_addr;
910 volatile uint32_t *cp_table_ptr;
911 u32 cp_table_size;
893}; 912};
894 913
895int radeon_ib_get(struct radeon_device *rdev, int ring, 914int radeon_ib_get(struct radeon_device *rdev, int ring,
@@ -921,8 +940,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
921int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, 940int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
922 unsigned size, uint32_t *data); 941 unsigned size, uint32_t *data);
923int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, 942int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
924 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 943 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop);
925 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
926void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); 944void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
927 945
928 946
@@ -1036,7 +1054,6 @@ struct radeon_wb {
1036#define R600_WB_DMA_RPTR_OFFSET 1792 1054#define R600_WB_DMA_RPTR_OFFSET 1792
1037#define R600_WB_IH_WPTR_OFFSET 2048 1055#define R600_WB_IH_WPTR_OFFSET 2048
1038#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 1056#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
1039#define R600_WB_UVD_RPTR_OFFSET 2560
1040#define R600_WB_EVENT_OFFSET 3072 1057#define R600_WB_EVENT_OFFSET 3072
1041#define CIK_WB_CP1_WPTR_OFFSET 3328 1058#define CIK_WB_CP1_WPTR_OFFSET 3328
1042#define CIK_WB_CP2_WPTR_OFFSET 3584 1059#define CIK_WB_CP2_WPTR_OFFSET 3584
@@ -1147,6 +1164,7 @@ enum radeon_int_thermal_type {
1147 THERMAL_TYPE_SI, 1164 THERMAL_TYPE_SI,
1148 THERMAL_TYPE_EMC2103_WITH_INTERNAL, 1165 THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1149 THERMAL_TYPE_CI, 1166 THERMAL_TYPE_CI,
1167 THERMAL_TYPE_KV,
1150}; 1168};
1151 1169
1152struct radeon_voltage { 1170struct radeon_voltage {
@@ -1220,6 +1238,9 @@ struct radeon_ps {
1220 /* UVD clocks */ 1238 /* UVD clocks */
1221 u32 vclk; 1239 u32 vclk;
1222 u32 dclk; 1240 u32 dclk;
1241 /* VCE clocks */
1242 u32 evclk;
1243 u32 ecclk;
1223 /* asic priv */ 1244 /* asic priv */
1224 void *ps_priv; 1245 void *ps_priv;
1225}; 1246};
@@ -1270,14 +1291,21 @@ struct radeon_clock_voltage_dependency_table {
1270 struct radeon_clock_voltage_dependency_entry *entries; 1291 struct radeon_clock_voltage_dependency_entry *entries;
1271}; 1292};
1272 1293
1273struct radeon_cac_leakage_entry { 1294union radeon_cac_leakage_entry {
1274 u16 vddc; 1295 struct {
1275 u32 leakage; 1296 u16 vddc;
1297 u32 leakage;
1298 };
1299 struct {
1300 u16 vddc1;
1301 u16 vddc2;
1302 u16 vddc3;
1303 };
1276}; 1304};
1277 1305
1278struct radeon_cac_leakage_table { 1306struct radeon_cac_leakage_table {
1279 u32 count; 1307 u32 count;
1280 struct radeon_cac_leakage_entry *entries; 1308 union radeon_cac_leakage_entry *entries;
1281}; 1309};
1282 1310
1283struct radeon_phase_shedding_limits_entry { 1311struct radeon_phase_shedding_limits_entry {
@@ -1291,6 +1319,28 @@ struct radeon_phase_shedding_limits_table {
1291 struct radeon_phase_shedding_limits_entry *entries; 1319 struct radeon_phase_shedding_limits_entry *entries;
1292}; 1320};
1293 1321
1322struct radeon_uvd_clock_voltage_dependency_entry {
1323 u32 vclk;
1324 u32 dclk;
1325 u16 v;
1326};
1327
1328struct radeon_uvd_clock_voltage_dependency_table {
1329 u8 count;
1330 struct radeon_uvd_clock_voltage_dependency_entry *entries;
1331};
1332
1333struct radeon_vce_clock_voltage_dependency_entry {
1334 u32 ecclk;
1335 u32 evclk;
1336 u16 v;
1337};
1338
1339struct radeon_vce_clock_voltage_dependency_table {
1340 u8 count;
1341 struct radeon_vce_clock_voltage_dependency_entry *entries;
1342};
1343
1294struct radeon_ppm_table { 1344struct radeon_ppm_table {
1295 u8 ppm_design; 1345 u8 ppm_design;
1296 u16 cpu_core_number; 1346 u16 cpu_core_number;
@@ -1304,11 +1354,27 @@ struct radeon_ppm_table {
1304 u32 tj_max; 1354 u32 tj_max;
1305}; 1355};
1306 1356
1357struct radeon_cac_tdp_table {
1358 u16 tdp;
1359 u16 configurable_tdp;
1360 u16 tdc;
1361 u16 battery_power_limit;
1362 u16 small_power_limit;
1363 u16 low_cac_leakage;
1364 u16 high_cac_leakage;
1365 u16 maximum_power_delivery_limit;
1366};
1367
1307struct radeon_dpm_dynamic_state { 1368struct radeon_dpm_dynamic_state {
1308 struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk; 1369 struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
1309 struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk; 1370 struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
1310 struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk; 1371 struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
1372 struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1311 struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk; 1373 struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1374 struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1375 struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1376 struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1377 struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1312 struct radeon_clock_array valid_sclk_values; 1378 struct radeon_clock_array valid_sclk_values;
1313 struct radeon_clock_array valid_mclk_values; 1379 struct radeon_clock_array valid_mclk_values;
1314 struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc; 1380 struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
@@ -1320,6 +1386,7 @@ struct radeon_dpm_dynamic_state {
1320 struct radeon_cac_leakage_table cac_leakage_table; 1386 struct radeon_cac_leakage_table cac_leakage_table;
1321 struct radeon_phase_shedding_limits_table phase_shedding_limits_table; 1387 struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
1322 struct radeon_ppm_table *ppm_table; 1388 struct radeon_ppm_table *ppm_table;
1389 struct radeon_cac_tdp_table *cac_tdp_table;
1323}; 1390};
1324 1391
1325struct radeon_dpm_fan { 1392struct radeon_dpm_fan {
@@ -1389,11 +1456,12 @@ struct radeon_dpm {
1389 struct radeon_dpm_thermal thermal; 1456 struct radeon_dpm_thermal thermal;
1390 /* forced levels */ 1457 /* forced levels */
1391 enum radeon_dpm_forced_level forced_level; 1458 enum radeon_dpm_forced_level forced_level;
1459 /* track UVD streams */
1460 unsigned sd;
1461 unsigned hd;
1392}; 1462};
1393 1463
1394void radeon_dpm_enable_power_state(struct radeon_device *rdev, 1464void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
1395 enum radeon_pm_state_type dpm_state);
1396
1397 1465
1398struct radeon_pm { 1466struct radeon_pm {
1399 struct mutex mutex; 1467 struct mutex mutex;
@@ -1468,9 +1536,9 @@ struct radeon_uvd {
1468 void *cpu_addr; 1536 void *cpu_addr;
1469 uint64_t gpu_addr; 1537 uint64_t gpu_addr;
1470 void *saved_bo; 1538 void *saved_bo;
1471 unsigned fw_size;
1472 atomic_t handles[RADEON_MAX_UVD_HANDLES]; 1539 atomic_t handles[RADEON_MAX_UVD_HANDLES];
1473 struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; 1540 struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
1541 unsigned img_size[RADEON_MAX_UVD_HANDLES];
1474 struct delayed_work idle_work; 1542 struct delayed_work idle_work;
1475}; 1543};
1476 1544
@@ -1499,12 +1567,21 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
1499int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, 1567int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1500 unsigned cg_upll_func_cntl); 1568 unsigned cg_upll_func_cntl);
1501 1569
1502struct r600_audio { 1570struct r600_audio_pin {
1503 int channels; 1571 int channels;
1504 int rate; 1572 int rate;
1505 int bits_per_sample; 1573 int bits_per_sample;
1506 u8 status_bits; 1574 u8 status_bits;
1507 u8 category_code; 1575 u8 category_code;
1576 u32 offset;
1577 bool connected;
1578 u32 id;
1579};
1580
1581struct r600_audio {
1582 bool enabled;
1583 struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
1584 int num_pins;
1508}; 1585};
1509 1586
1510/* 1587/*
@@ -1536,6 +1613,34 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
1536 unsigned nfiles); 1613 unsigned nfiles);
1537int radeon_debugfs_fence_init(struct radeon_device *rdev); 1614int radeon_debugfs_fence_init(struct radeon_device *rdev);
1538 1615
1616/*
1617 * ASIC ring specific functions.
1618 */
1619struct radeon_asic_ring {
1620 /* ring read/write ptr handling */
1621 u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1622 u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1623 void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1624
1625 /* validating and patching of IBs */
1626 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
1627 int (*cs_parse)(struct radeon_cs_parser *p);
1628
1629 /* command emmit functions */
1630 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1631 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1632 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1633 struct radeon_semaphore *semaphore, bool emit_wait);
1634 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
1635
1636 /* testing functions */
1637 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1638 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1639 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1640
1641 /* deprecated */
1642 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1643};
1539 1644
1540/* 1645/*
1541 * ASIC specific functions. 1646 * ASIC specific functions.
@@ -1579,23 +1684,7 @@ struct radeon_asic {
1579 uint32_t incr, uint32_t flags); 1684 uint32_t incr, uint32_t flags);
1580 } vm; 1685 } vm;
1581 /* ring specific callbacks */ 1686 /* ring specific callbacks */
1582 struct { 1687 struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
1583 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1584 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
1585 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1586 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1587 struct radeon_semaphore *semaphore, bool emit_wait);
1588 int (*cs_parse)(struct radeon_cs_parser *p);
1589 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1590 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1591 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1592 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1593 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
1594
1595 u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1596 u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1597 void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1598 } ring[RADEON_NUM_RINGS];
1599 /* irqs */ 1688 /* irqs */
1600 struct { 1689 struct {
1601 int (*set)(struct radeon_device *rdev); 1690 int (*set)(struct radeon_device *rdev);
@@ -1688,6 +1777,7 @@ struct radeon_asic {
1688 void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m); 1777 void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
1689 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); 1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
1690 bool (*vblank_too_short)(struct radeon_device *rdev); 1779 bool (*vblank_too_short)(struct radeon_device *rdev);
1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
1691 } dpm; 1781 } dpm;
1692 /* pageflipping */ 1782 /* pageflipping */
1693 struct { 1783 struct {
@@ -2066,7 +2156,7 @@ struct radeon_device {
2066 const struct firmware *mec_fw; /* CIK MEC firmware */ 2156 const struct firmware *mec_fw; /* CIK MEC firmware */
2067 const struct firmware *sdma_fw; /* CIK SDMA firmware */ 2157 const struct firmware *sdma_fw; /* CIK SDMA firmware */
2068 const struct firmware *smc_fw; /* SMC firmware */ 2158 const struct firmware *smc_fw; /* SMC firmware */
2069 struct r600_blit r600_blit; 2159 const struct firmware *uvd_fw; /* UVD firmware */
2070 struct r600_vram_scratch vram_scratch; 2160 struct r600_vram_scratch vram_scratch;
2071 int msi_enabled; /* msi enabled */ 2161 int msi_enabled; /* msi enabled */
2072 struct r600_ih ih; /* r6/700 interrupt ring */ 2162 struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2077,9 +2167,8 @@ struct radeon_device {
2077 struct work_struct reset_work; 2167 struct work_struct reset_work;
2078 int num_crtc; /* number of crtcs */ 2168 int num_crtc; /* number of crtcs */
2079 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 2169 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
2080 bool audio_enabled;
2081 bool has_uvd; 2170 bool has_uvd;
2082 struct r600_audio audio_status; /* audio stuff */ 2171 struct r600_audio audio; /* audio stuff */
2083 struct notifier_block acpi_nb; 2172 struct notifier_block acpi_nb;
2084 /* only one userspace can use Hyperz features or CMASK at a time */ 2173 /* only one userspace can use Hyperz features or CMASK at a time */
2085 struct drm_file *hyperz_filp; 2174 struct drm_file *hyperz_filp;
@@ -2095,6 +2184,11 @@ struct radeon_device {
2095 /* ACPI interface */ 2184 /* ACPI interface */
2096 struct radeon_atif atif; 2185 struct radeon_atif atif;
2097 struct radeon_atcs atcs; 2186 struct radeon_atcs atcs;
2187 /* srbm instance registers */
2188 struct mutex srbm_mutex;
2189 /* clock, powergating flags */
2190 u32 cg_flags;
2191 u32 pg_flags;
2098}; 2192};
2099 2193
2100int radeon_device_init(struct radeon_device *rdev, 2194int radeon_device_init(struct radeon_device *rdev,
@@ -2153,6 +2247,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
2153#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v)) 2247#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
2154#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg)) 2248#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
2155#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v)) 2249#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
2250#define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg))
2251#define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v))
2156#define WREG32_P(reg, val, mask) \ 2252#define WREG32_P(reg, val, mask) \
2157 do { \ 2253 do { \
2158 uint32_t tmp_ = RREG32(reg); \ 2254 uint32_t tmp_ = RREG32(reg); \
@@ -2161,7 +2257,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
2161 WREG32(reg, tmp_); \ 2257 WREG32(reg, tmp_); \
2162 } while (0) 2258 } while (0)
2163#define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 2259#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2164#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) 2260#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2165#define WREG32_PLL_P(reg, val, mask) \ 2261#define WREG32_PLL_P(reg, val, mask) \
2166 do { \ 2262 do { \
2167 uint32_t tmp_ = RREG32_PLL(reg); \ 2263 uint32_t tmp_ = RREG32_PLL(reg); \
@@ -2284,6 +2380,22 @@ static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2284 WREG32(R600_UVD_CTX_DATA, (v)); 2380 WREG32(R600_UVD_CTX_DATA, (v));
2285} 2381}
2286 2382
2383
2384static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
2385{
2386 u32 r;
2387
2388 WREG32(CIK_DIDT_IND_INDEX, (reg));
2389 r = RREG32(CIK_DIDT_IND_DATA);
2390 return r;
2391}
2392
2393static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2394{
2395 WREG32(CIK_DIDT_IND_INDEX, (reg));
2396 WREG32(CIK_DIDT_IND_DATA, (v));
2397}
2398
2287void r100_pll_errata_after_index(struct radeon_device *rdev); 2399void r100_pll_errata_after_index(struct radeon_device *rdev);
2288 2400
2289 2401
@@ -2379,7 +2491,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2379#define radeon_fini(rdev) (rdev)->asic->fini((rdev)) 2491#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
2380#define radeon_resume(rdev) (rdev)->asic->resume((rdev)) 2492#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
2381#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 2493#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
2382#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p)) 2494#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
2383#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 2495#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
2384#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 2496#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
2385#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 2497#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
@@ -2387,16 +2499,16 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2387#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 2499#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2388#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 2500#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2389#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags))) 2501#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2390#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) 2502#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
2391#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) 2503#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
2392#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) 2504#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
2393#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) 2505#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
2394#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) 2506#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
2395#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) 2507#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
2396#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm)) 2508#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
2397#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_rptr((rdev), (r)) 2509#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
2398#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_wptr((rdev), (r)) 2510#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
2399#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].set_wptr((rdev), (r)) 2511#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
2400#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 2512#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
2401#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 2513#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
2402#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 2514#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -2404,8 +2516,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2404#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e)) 2516#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
2405#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b)) 2517#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
2406#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m)) 2518#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
2407#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) 2519#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
2408#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) 2520#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
2409#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) 2521#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
2410#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) 2522#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
2411#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) 2523#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
@@ -2456,6 +2568,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2456#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m)) 2568#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
2457#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) 2569#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
2458#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) 2570#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
2571#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
2459 2572
2460/* Common functions */ 2573/* Common functions */
2461/* AGP */ 2574/* AGP */
@@ -2522,6 +2635,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
2522 2635
2523/* audio */ 2636/* audio */
2524void r600_audio_update_hdmi(struct work_struct *work); 2637void r600_audio_update_hdmi(struct work_struct *work);
2638struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
2639struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
2525 2640
2526/* 2641/*
2527 * R600 vram scratch functions 2642 * R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 78bec1a58ed1..630853b96841 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -172,6 +172,21 @@ void radeon_agp_disable(struct radeon_device *rdev)
172/* 172/*
173 * ASIC 173 * ASIC
174 */ 174 */
175
176static struct radeon_asic_ring r100_gfx_ring = {
177 .ib_execute = &r100_ring_ib_execute,
178 .emit_fence = &r100_fence_ring_emit,
179 .emit_semaphore = &r100_semaphore_ring_emit,
180 .cs_parse = &r100_cs_parse,
181 .ring_start = &r100_ring_start,
182 .ring_test = &r100_ring_test,
183 .ib_test = &r100_ib_test,
184 .is_lockup = &r100_gpu_is_lockup,
185 .get_rptr = &radeon_ring_generic_get_rptr,
186 .get_wptr = &radeon_ring_generic_get_wptr,
187 .set_wptr = &radeon_ring_generic_set_wptr,
188};
189
175static struct radeon_asic r100_asic = { 190static struct radeon_asic r100_asic = {
176 .init = &r100_init, 191 .init = &r100_init,
177 .fini = &r100_fini, 192 .fini = &r100_fini,
@@ -187,19 +202,7 @@ static struct radeon_asic r100_asic = {
187 .set_page = &r100_pci_gart_set_page, 202 .set_page = &r100_pci_gart_set_page,
188 }, 203 },
189 .ring = { 204 .ring = {
190 [RADEON_RING_TYPE_GFX_INDEX] = { 205 [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
191 .ib_execute = &r100_ring_ib_execute,
192 .emit_fence = &r100_fence_ring_emit,
193 .emit_semaphore = &r100_semaphore_ring_emit,
194 .cs_parse = &r100_cs_parse,
195 .ring_start = &r100_ring_start,
196 .ring_test = &r100_ring_test,
197 .ib_test = &r100_ib_test,
198 .is_lockup = &r100_gpu_is_lockup,
199 .get_rptr = &radeon_ring_generic_get_rptr,
200 .get_wptr = &radeon_ring_generic_get_wptr,
201 .set_wptr = &radeon_ring_generic_set_wptr,
202 }
203 }, 206 },
204 .irq = { 207 .irq = {
205 .set = &r100_irq_set, 208 .set = &r100_irq_set,
@@ -266,19 +269,7 @@ static struct radeon_asic r200_asic = {
266 .set_page = &r100_pci_gart_set_page, 269 .set_page = &r100_pci_gart_set_page,
267 }, 270 },
268 .ring = { 271 .ring = {
269 [RADEON_RING_TYPE_GFX_INDEX] = { 272 [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
270 .ib_execute = &r100_ring_ib_execute,
271 .emit_fence = &r100_fence_ring_emit,
272 .emit_semaphore = &r100_semaphore_ring_emit,
273 .cs_parse = &r100_cs_parse,
274 .ring_start = &r100_ring_start,
275 .ring_test = &r100_ring_test,
276 .ib_test = &r100_ib_test,
277 .is_lockup = &r100_gpu_is_lockup,
278 .get_rptr = &radeon_ring_generic_get_rptr,
279 .get_wptr = &radeon_ring_generic_get_wptr,
280 .set_wptr = &radeon_ring_generic_set_wptr,
281 }
282 }, 273 },
283 .irq = { 274 .irq = {
284 .set = &r100_irq_set, 275 .set = &r100_irq_set,
@@ -330,6 +321,20 @@ static struct radeon_asic r200_asic = {
330 }, 321 },
331}; 322};
332 323
324static struct radeon_asic_ring r300_gfx_ring = {
325 .ib_execute = &r100_ring_ib_execute,
326 .emit_fence = &r300_fence_ring_emit,
327 .emit_semaphore = &r100_semaphore_ring_emit,
328 .cs_parse = &r300_cs_parse,
329 .ring_start = &r300_ring_start,
330 .ring_test = &r100_ring_test,
331 .ib_test = &r100_ib_test,
332 .is_lockup = &r100_gpu_is_lockup,
333 .get_rptr = &radeon_ring_generic_get_rptr,
334 .get_wptr = &radeon_ring_generic_get_wptr,
335 .set_wptr = &radeon_ring_generic_set_wptr,
336};
337
333static struct radeon_asic r300_asic = { 338static struct radeon_asic r300_asic = {
334 .init = &r300_init, 339 .init = &r300_init,
335 .fini = &r300_fini, 340 .fini = &r300_fini,
@@ -345,19 +350,7 @@ static struct radeon_asic r300_asic = {
345 .set_page = &r100_pci_gart_set_page, 350 .set_page = &r100_pci_gart_set_page,
346 }, 351 },
347 .ring = { 352 .ring = {
348 [RADEON_RING_TYPE_GFX_INDEX] = { 353 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
349 .ib_execute = &r100_ring_ib_execute,
350 .emit_fence = &r300_fence_ring_emit,
351 .emit_semaphore = &r100_semaphore_ring_emit,
352 .cs_parse = &r300_cs_parse,
353 .ring_start = &r300_ring_start,
354 .ring_test = &r100_ring_test,
355 .ib_test = &r100_ib_test,
356 .is_lockup = &r100_gpu_is_lockup,
357 .get_rptr = &radeon_ring_generic_get_rptr,
358 .get_wptr = &radeon_ring_generic_get_wptr,
359 .set_wptr = &radeon_ring_generic_set_wptr,
360 }
361 }, 354 },
362 .irq = { 355 .irq = {
363 .set = &r100_irq_set, 356 .set = &r100_irq_set,
@@ -424,19 +417,7 @@ static struct radeon_asic r300_asic_pcie = {
424 .set_page = &rv370_pcie_gart_set_page, 417 .set_page = &rv370_pcie_gart_set_page,
425 }, 418 },
426 .ring = { 419 .ring = {
427 [RADEON_RING_TYPE_GFX_INDEX] = { 420 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
428 .ib_execute = &r100_ring_ib_execute,
429 .emit_fence = &r300_fence_ring_emit,
430 .emit_semaphore = &r100_semaphore_ring_emit,
431 .cs_parse = &r300_cs_parse,
432 .ring_start = &r300_ring_start,
433 .ring_test = &r100_ring_test,
434 .ib_test = &r100_ib_test,
435 .is_lockup = &r100_gpu_is_lockup,
436 .get_rptr = &radeon_ring_generic_get_rptr,
437 .get_wptr = &radeon_ring_generic_get_wptr,
438 .set_wptr = &radeon_ring_generic_set_wptr,
439 }
440 }, 421 },
441 .irq = { 422 .irq = {
442 .set = &r100_irq_set, 423 .set = &r100_irq_set,
@@ -503,19 +484,7 @@ static struct radeon_asic r420_asic = {
503 .set_page = &rv370_pcie_gart_set_page, 484 .set_page = &rv370_pcie_gart_set_page,
504 }, 485 },
505 .ring = { 486 .ring = {
506 [RADEON_RING_TYPE_GFX_INDEX] = { 487 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
507 .ib_execute = &r100_ring_ib_execute,
508 .emit_fence = &r300_fence_ring_emit,
509 .emit_semaphore = &r100_semaphore_ring_emit,
510 .cs_parse = &r300_cs_parse,
511 .ring_start = &r300_ring_start,
512 .ring_test = &r100_ring_test,
513 .ib_test = &r100_ib_test,
514 .is_lockup = &r100_gpu_is_lockup,
515 .get_rptr = &radeon_ring_generic_get_rptr,
516 .get_wptr = &radeon_ring_generic_get_wptr,
517 .set_wptr = &radeon_ring_generic_set_wptr,
518 }
519 }, 488 },
520 .irq = { 489 .irq = {
521 .set = &r100_irq_set, 490 .set = &r100_irq_set,
@@ -582,19 +551,7 @@ static struct radeon_asic rs400_asic = {
582 .set_page = &rs400_gart_set_page, 551 .set_page = &rs400_gart_set_page,
583 }, 552 },
584 .ring = { 553 .ring = {
585 [RADEON_RING_TYPE_GFX_INDEX] = { 554 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
586 .ib_execute = &r100_ring_ib_execute,
587 .emit_fence = &r300_fence_ring_emit,
588 .emit_semaphore = &r100_semaphore_ring_emit,
589 .cs_parse = &r300_cs_parse,
590 .ring_start = &r300_ring_start,
591 .ring_test = &r100_ring_test,
592 .ib_test = &r100_ib_test,
593 .is_lockup = &r100_gpu_is_lockup,
594 .get_rptr = &radeon_ring_generic_get_rptr,
595 .get_wptr = &radeon_ring_generic_get_wptr,
596 .set_wptr = &radeon_ring_generic_set_wptr,
597 }
598 }, 555 },
599 .irq = { 556 .irq = {
600 .set = &r100_irq_set, 557 .set = &r100_irq_set,
@@ -661,19 +618,7 @@ static struct radeon_asic rs600_asic = {
661 .set_page = &rs600_gart_set_page, 618 .set_page = &rs600_gart_set_page,
662 }, 619 },
663 .ring = { 620 .ring = {
664 [RADEON_RING_TYPE_GFX_INDEX] = { 621 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
665 .ib_execute = &r100_ring_ib_execute,
666 .emit_fence = &r300_fence_ring_emit,
667 .emit_semaphore = &r100_semaphore_ring_emit,
668 .cs_parse = &r300_cs_parse,
669 .ring_start = &r300_ring_start,
670 .ring_test = &r100_ring_test,
671 .ib_test = &r100_ib_test,
672 .is_lockup = &r100_gpu_is_lockup,
673 .get_rptr = &radeon_ring_generic_get_rptr,
674 .get_wptr = &radeon_ring_generic_get_wptr,
675 .set_wptr = &radeon_ring_generic_set_wptr,
676 }
677 }, 622 },
678 .irq = { 623 .irq = {
679 .set = &rs600_irq_set, 624 .set = &rs600_irq_set,
@@ -742,19 +687,7 @@ static struct radeon_asic rs690_asic = {
742 .set_page = &rs400_gart_set_page, 687 .set_page = &rs400_gart_set_page,
743 }, 688 },
744 .ring = { 689 .ring = {
745 [RADEON_RING_TYPE_GFX_INDEX] = { 690 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
746 .ib_execute = &r100_ring_ib_execute,
747 .emit_fence = &r300_fence_ring_emit,
748 .emit_semaphore = &r100_semaphore_ring_emit,
749 .cs_parse = &r300_cs_parse,
750 .ring_start = &r300_ring_start,
751 .ring_test = &r100_ring_test,
752 .ib_test = &r100_ib_test,
753 .is_lockup = &r100_gpu_is_lockup,
754 .get_rptr = &radeon_ring_generic_get_rptr,
755 .get_wptr = &radeon_ring_generic_get_wptr,
756 .set_wptr = &radeon_ring_generic_set_wptr,
757 }
758 }, 691 },
759 .irq = { 692 .irq = {
760 .set = &rs600_irq_set, 693 .set = &rs600_irq_set,
@@ -823,19 +756,7 @@ static struct radeon_asic rv515_asic = {
823 .set_page = &rv370_pcie_gart_set_page, 756 .set_page = &rv370_pcie_gart_set_page,
824 }, 757 },
825 .ring = { 758 .ring = {
826 [RADEON_RING_TYPE_GFX_INDEX] = { 759 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
827 .ib_execute = &r100_ring_ib_execute,
828 .emit_fence = &r300_fence_ring_emit,
829 .emit_semaphore = &r100_semaphore_ring_emit,
830 .cs_parse = &r300_cs_parse,
831 .ring_start = &rv515_ring_start,
832 .ring_test = &r100_ring_test,
833 .ib_test = &r100_ib_test,
834 .is_lockup = &r100_gpu_is_lockup,
835 .get_rptr = &radeon_ring_generic_get_rptr,
836 .get_wptr = &radeon_ring_generic_get_wptr,
837 .set_wptr = &radeon_ring_generic_set_wptr,
838 }
839 }, 760 },
840 .irq = { 761 .irq = {
841 .set = &rs600_irq_set, 762 .set = &rs600_irq_set,
@@ -902,19 +823,7 @@ static struct radeon_asic r520_asic = {
902 .set_page = &rv370_pcie_gart_set_page, 823 .set_page = &rv370_pcie_gart_set_page,
903 }, 824 },
904 .ring = { 825 .ring = {
905 [RADEON_RING_TYPE_GFX_INDEX] = { 826 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
906 .ib_execute = &r100_ring_ib_execute,
907 .emit_fence = &r300_fence_ring_emit,
908 .emit_semaphore = &r100_semaphore_ring_emit,
909 .cs_parse = &r300_cs_parse,
910 .ring_start = &rv515_ring_start,
911 .ring_test = &r100_ring_test,
912 .ib_test = &r100_ib_test,
913 .is_lockup = &r100_gpu_is_lockup,
914 .get_rptr = &radeon_ring_generic_get_rptr,
915 .get_wptr = &radeon_ring_generic_get_wptr,
916 .set_wptr = &radeon_ring_generic_set_wptr,
917 }
918 }, 827 },
919 .irq = { 828 .irq = {
920 .set = &rs600_irq_set, 829 .set = &rs600_irq_set,
@@ -966,6 +875,32 @@ static struct radeon_asic r520_asic = {
966 }, 875 },
967}; 876};
968 877
878static struct radeon_asic_ring r600_gfx_ring = {
879 .ib_execute = &r600_ring_ib_execute,
880 .emit_fence = &r600_fence_ring_emit,
881 .emit_semaphore = &r600_semaphore_ring_emit,
882 .cs_parse = &r600_cs_parse,
883 .ring_test = &r600_ring_test,
884 .ib_test = &r600_ib_test,
885 .is_lockup = &r600_gfx_is_lockup,
886 .get_rptr = &radeon_ring_generic_get_rptr,
887 .get_wptr = &radeon_ring_generic_get_wptr,
888 .set_wptr = &radeon_ring_generic_set_wptr,
889};
890
891static struct radeon_asic_ring r600_dma_ring = {
892 .ib_execute = &r600_dma_ring_ib_execute,
893 .emit_fence = &r600_dma_fence_ring_emit,
894 .emit_semaphore = &r600_dma_semaphore_ring_emit,
895 .cs_parse = &r600_dma_cs_parse,
896 .ring_test = &r600_dma_ring_test,
897 .ib_test = &r600_dma_ib_test,
898 .is_lockup = &r600_dma_is_lockup,
899 .get_rptr = &r600_dma_get_rptr,
900 .get_wptr = &r600_dma_get_wptr,
901 .set_wptr = &r600_dma_set_wptr,
902};
903
969static struct radeon_asic r600_asic = { 904static struct radeon_asic r600_asic = {
970 .init = &r600_init, 905 .init = &r600_init,
971 .fini = &r600_fini, 906 .fini = &r600_fini,
@@ -983,30 +918,8 @@ static struct radeon_asic r600_asic = {
983 .set_page = &rs600_gart_set_page, 918 .set_page = &rs600_gart_set_page,
984 }, 919 },
985 .ring = { 920 .ring = {
986 [RADEON_RING_TYPE_GFX_INDEX] = { 921 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
987 .ib_execute = &r600_ring_ib_execute, 922 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
988 .emit_fence = &r600_fence_ring_emit,
989 .emit_semaphore = &r600_semaphore_ring_emit,
990 .cs_parse = &r600_cs_parse,
991 .ring_test = &r600_ring_test,
992 .ib_test = &r600_ib_test,
993 .is_lockup = &r600_gfx_is_lockup,
994 .get_rptr = &radeon_ring_generic_get_rptr,
995 .get_wptr = &radeon_ring_generic_get_wptr,
996 .set_wptr = &radeon_ring_generic_set_wptr,
997 },
998 [R600_RING_TYPE_DMA_INDEX] = {
999 .ib_execute = &r600_dma_ring_ib_execute,
1000 .emit_fence = &r600_dma_fence_ring_emit,
1001 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1002 .cs_parse = &r600_dma_cs_parse,
1003 .ring_test = &r600_dma_ring_test,
1004 .ib_test = &r600_dma_ib_test,
1005 .is_lockup = &r600_dma_is_lockup,
1006 .get_rptr = &radeon_ring_generic_get_rptr,
1007 .get_wptr = &radeon_ring_generic_get_wptr,
1008 .set_wptr = &radeon_ring_generic_set_wptr,
1009 }
1010 }, 923 },
1011 .irq = { 924 .irq = {
1012 .set = &r600_irq_set, 925 .set = &r600_irq_set,
@@ -1022,7 +935,7 @@ static struct radeon_asic r600_asic = {
1022 .hdmi_setmode = &r600_hdmi_setmode, 935 .hdmi_setmode = &r600_hdmi_setmode,
1023 }, 936 },
1024 .copy = { 937 .copy = {
1025 .blit = &r600_copy_blit, 938 .blit = &r600_copy_cpdma,
1026 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 939 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1027 .dma = &r600_copy_dma, 940 .dma = &r600_copy_dma,
1028 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 941 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1078,30 +991,8 @@ static struct radeon_asic rv6xx_asic = {
1078 .set_page = &rs600_gart_set_page, 991 .set_page = &rs600_gart_set_page,
1079 }, 992 },
1080 .ring = { 993 .ring = {
1081 [RADEON_RING_TYPE_GFX_INDEX] = { 994 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
1082 .ib_execute = &r600_ring_ib_execute, 995 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
1083 .emit_fence = &r600_fence_ring_emit,
1084 .emit_semaphore = &r600_semaphore_ring_emit,
1085 .cs_parse = &r600_cs_parse,
1086 .ring_test = &r600_ring_test,
1087 .ib_test = &r600_ib_test,
1088 .is_lockup = &r600_gfx_is_lockup,
1089 .get_rptr = &radeon_ring_generic_get_rptr,
1090 .get_wptr = &radeon_ring_generic_get_wptr,
1091 .set_wptr = &radeon_ring_generic_set_wptr,
1092 },
1093 [R600_RING_TYPE_DMA_INDEX] = {
1094 .ib_execute = &r600_dma_ring_ib_execute,
1095 .emit_fence = &r600_dma_fence_ring_emit,
1096 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1097 .cs_parse = &r600_dma_cs_parse,
1098 .ring_test = &r600_dma_ring_test,
1099 .ib_test = &r600_dma_ib_test,
1100 .is_lockup = &r600_dma_is_lockup,
1101 .get_rptr = &radeon_ring_generic_get_rptr,
1102 .get_wptr = &radeon_ring_generic_get_wptr,
1103 .set_wptr = &radeon_ring_generic_set_wptr,
1104 }
1105 }, 996 },
1106 .irq = { 997 .irq = {
1107 .set = &r600_irq_set, 998 .set = &r600_irq_set,
@@ -1115,7 +1006,7 @@ static struct radeon_asic rv6xx_asic = {
1115 .get_backlight_level = &atombios_get_backlight_level, 1006 .get_backlight_level = &atombios_get_backlight_level,
1116 }, 1007 },
1117 .copy = { 1008 .copy = {
1118 .blit = &r600_copy_blit, 1009 .blit = &r600_copy_cpdma,
1119 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1010 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1120 .dma = &r600_copy_dma, 1011 .dma = &r600_copy_dma,
1121 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1012 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1161,6 +1052,7 @@ static struct radeon_asic rv6xx_asic = {
1161 .get_mclk = &rv6xx_dpm_get_mclk, 1052 .get_mclk = &rv6xx_dpm_get_mclk,
1162 .print_power_state = &rv6xx_dpm_print_power_state, 1053 .print_power_state = &rv6xx_dpm_print_power_state,
1163 .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, 1054 .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
1055 .force_performance_level = &rv6xx_dpm_force_performance_level,
1164 }, 1056 },
1165 .pflip = { 1057 .pflip = {
1166 .pre_page_flip = &rs600_pre_page_flip, 1058 .pre_page_flip = &rs600_pre_page_flip,
@@ -1186,30 +1078,8 @@ static struct radeon_asic rs780_asic = {
1186 .set_page = &rs600_gart_set_page, 1078 .set_page = &rs600_gart_set_page,
1187 }, 1079 },
1188 .ring = { 1080 .ring = {
1189 [RADEON_RING_TYPE_GFX_INDEX] = { 1081 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
1190 .ib_execute = &r600_ring_ib_execute, 1082 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
1191 .emit_fence = &r600_fence_ring_emit,
1192 .emit_semaphore = &r600_semaphore_ring_emit,
1193 .cs_parse = &r600_cs_parse,
1194 .ring_test = &r600_ring_test,
1195 .ib_test = &r600_ib_test,
1196 .is_lockup = &r600_gfx_is_lockup,
1197 .get_rptr = &radeon_ring_generic_get_rptr,
1198 .get_wptr = &radeon_ring_generic_get_wptr,
1199 .set_wptr = &radeon_ring_generic_set_wptr,
1200 },
1201 [R600_RING_TYPE_DMA_INDEX] = {
1202 .ib_execute = &r600_dma_ring_ib_execute,
1203 .emit_fence = &r600_dma_fence_ring_emit,
1204 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1205 .cs_parse = &r600_dma_cs_parse,
1206 .ring_test = &r600_dma_ring_test,
1207 .ib_test = &r600_dma_ib_test,
1208 .is_lockup = &r600_dma_is_lockup,
1209 .get_rptr = &radeon_ring_generic_get_rptr,
1210 .get_wptr = &radeon_ring_generic_get_wptr,
1211 .set_wptr = &radeon_ring_generic_set_wptr,
1212 }
1213 }, 1083 },
1214 .irq = { 1084 .irq = {
1215 .set = &r600_irq_set, 1085 .set = &r600_irq_set,
@@ -1225,7 +1095,7 @@ static struct radeon_asic rs780_asic = {
1225 .hdmi_setmode = &r600_hdmi_setmode, 1095 .hdmi_setmode = &r600_hdmi_setmode,
1226 }, 1096 },
1227 .copy = { 1097 .copy = {
1228 .blit = &r600_copy_blit, 1098 .blit = &r600_copy_cpdma,
1229 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1099 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1230 .dma = &r600_copy_dma, 1100 .dma = &r600_copy_dma,
1231 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1101 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1279,6 +1149,19 @@ static struct radeon_asic rs780_asic = {
1279 }, 1149 },
1280}; 1150};
1281 1151
1152static struct radeon_asic_ring rv770_uvd_ring = {
1153 .ib_execute = &uvd_v1_0_ib_execute,
1154 .emit_fence = &uvd_v2_2_fence_emit,
1155 .emit_semaphore = &uvd_v1_0_semaphore_emit,
1156 .cs_parse = &radeon_uvd_cs_parse,
1157 .ring_test = &uvd_v1_0_ring_test,
1158 .ib_test = &uvd_v1_0_ib_test,
1159 .is_lockup = &radeon_ring_test_lockup,
1160 .get_rptr = &uvd_v1_0_get_rptr,
1161 .get_wptr = &uvd_v1_0_get_wptr,
1162 .set_wptr = &uvd_v1_0_set_wptr,
1163};
1164
1282static struct radeon_asic rv770_asic = { 1165static struct radeon_asic rv770_asic = {
1283 .init = &rv770_init, 1166 .init = &rv770_init,
1284 .fini = &rv770_fini, 1167 .fini = &rv770_fini,
@@ -1296,42 +1179,9 @@ static struct radeon_asic rv770_asic = {
1296 .set_page = &rs600_gart_set_page, 1179 .set_page = &rs600_gart_set_page,
1297 }, 1180 },
1298 .ring = { 1181 .ring = {
1299 [RADEON_RING_TYPE_GFX_INDEX] = { 1182 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
1300 .ib_execute = &r600_ring_ib_execute, 1183 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
1301 .emit_fence = &r600_fence_ring_emit, 1184 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1302 .emit_semaphore = &r600_semaphore_ring_emit,
1303 .cs_parse = &r600_cs_parse,
1304 .ring_test = &r600_ring_test,
1305 .ib_test = &r600_ib_test,
1306 .is_lockup = &r600_gfx_is_lockup,
1307 .get_rptr = &radeon_ring_generic_get_rptr,
1308 .get_wptr = &radeon_ring_generic_get_wptr,
1309 .set_wptr = &radeon_ring_generic_set_wptr,
1310 },
1311 [R600_RING_TYPE_DMA_INDEX] = {
1312 .ib_execute = &r600_dma_ring_ib_execute,
1313 .emit_fence = &r600_dma_fence_ring_emit,
1314 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1315 .cs_parse = &r600_dma_cs_parse,
1316 .ring_test = &r600_dma_ring_test,
1317 .ib_test = &r600_dma_ib_test,
1318 .is_lockup = &r600_dma_is_lockup,
1319 .get_rptr = &radeon_ring_generic_get_rptr,
1320 .get_wptr = &radeon_ring_generic_get_wptr,
1321 .set_wptr = &radeon_ring_generic_set_wptr,
1322 },
1323 [R600_RING_TYPE_UVD_INDEX] = {
1324 .ib_execute = &r600_uvd_ib_execute,
1325 .emit_fence = &r600_uvd_fence_emit,
1326 .emit_semaphore = &r600_uvd_semaphore_emit,
1327 .cs_parse = &radeon_uvd_cs_parse,
1328 .ring_test = &r600_uvd_ring_test,
1329 .ib_test = &r600_uvd_ib_test,
1330 .is_lockup = &radeon_ring_test_lockup,
1331 .get_rptr = &radeon_ring_generic_get_rptr,
1332 .get_wptr = &radeon_ring_generic_get_wptr,
1333 .set_wptr = &radeon_ring_generic_set_wptr,
1334 }
1335 }, 1185 },
1336 .irq = { 1186 .irq = {
1337 .set = &r600_irq_set, 1187 .set = &r600_irq_set,
@@ -1347,7 +1197,7 @@ static struct radeon_asic rv770_asic = {
1347 .hdmi_setmode = &r600_hdmi_setmode, 1197 .hdmi_setmode = &r600_hdmi_setmode,
1348 }, 1198 },
1349 .copy = { 1199 .copy = {
1350 .blit = &r600_copy_blit, 1200 .blit = &r600_copy_cpdma,
1351 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1201 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1352 .dma = &rv770_copy_dma, 1202 .dma = &rv770_copy_dma,
1353 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1203 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1404,6 +1254,32 @@ static struct radeon_asic rv770_asic = {
1404 }, 1254 },
1405}; 1255};
1406 1256
1257static struct radeon_asic_ring evergreen_gfx_ring = {
1258 .ib_execute = &evergreen_ring_ib_execute,
1259 .emit_fence = &r600_fence_ring_emit,
1260 .emit_semaphore = &r600_semaphore_ring_emit,
1261 .cs_parse = &evergreen_cs_parse,
1262 .ring_test = &r600_ring_test,
1263 .ib_test = &r600_ib_test,
1264 .is_lockup = &evergreen_gfx_is_lockup,
1265 .get_rptr = &radeon_ring_generic_get_rptr,
1266 .get_wptr = &radeon_ring_generic_get_wptr,
1267 .set_wptr = &radeon_ring_generic_set_wptr,
1268};
1269
1270static struct radeon_asic_ring evergreen_dma_ring = {
1271 .ib_execute = &evergreen_dma_ring_ib_execute,
1272 .emit_fence = &evergreen_dma_fence_ring_emit,
1273 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1274 .cs_parse = &evergreen_dma_cs_parse,
1275 .ring_test = &r600_dma_ring_test,
1276 .ib_test = &r600_dma_ib_test,
1277 .is_lockup = &evergreen_dma_is_lockup,
1278 .get_rptr = &r600_dma_get_rptr,
1279 .get_wptr = &r600_dma_get_wptr,
1280 .set_wptr = &r600_dma_set_wptr,
1281};
1282
1407static struct radeon_asic evergreen_asic = { 1283static struct radeon_asic evergreen_asic = {
1408 .init = &evergreen_init, 1284 .init = &evergreen_init,
1409 .fini = &evergreen_fini, 1285 .fini = &evergreen_fini,
@@ -1421,42 +1297,9 @@ static struct radeon_asic evergreen_asic = {
1421 .set_page = &rs600_gart_set_page, 1297 .set_page = &rs600_gart_set_page,
1422 }, 1298 },
1423 .ring = { 1299 .ring = {
1424 [RADEON_RING_TYPE_GFX_INDEX] = { 1300 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
1425 .ib_execute = &evergreen_ring_ib_execute, 1301 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
1426 .emit_fence = &r600_fence_ring_emit, 1302 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1427 .emit_semaphore = &r600_semaphore_ring_emit,
1428 .cs_parse = &evergreen_cs_parse,
1429 .ring_test = &r600_ring_test,
1430 .ib_test = &r600_ib_test,
1431 .is_lockup = &evergreen_gfx_is_lockup,
1432 .get_rptr = &radeon_ring_generic_get_rptr,
1433 .get_wptr = &radeon_ring_generic_get_wptr,
1434 .set_wptr = &radeon_ring_generic_set_wptr,
1435 },
1436 [R600_RING_TYPE_DMA_INDEX] = {
1437 .ib_execute = &evergreen_dma_ring_ib_execute,
1438 .emit_fence = &evergreen_dma_fence_ring_emit,
1439 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1440 .cs_parse = &evergreen_dma_cs_parse,
1441 .ring_test = &r600_dma_ring_test,
1442 .ib_test = &r600_dma_ib_test,
1443 .is_lockup = &evergreen_dma_is_lockup,
1444 .get_rptr = &radeon_ring_generic_get_rptr,
1445 .get_wptr = &radeon_ring_generic_get_wptr,
1446 .set_wptr = &radeon_ring_generic_set_wptr,
1447 },
1448 [R600_RING_TYPE_UVD_INDEX] = {
1449 .ib_execute = &r600_uvd_ib_execute,
1450 .emit_fence = &r600_uvd_fence_emit,
1451 .emit_semaphore = &r600_uvd_semaphore_emit,
1452 .cs_parse = &radeon_uvd_cs_parse,
1453 .ring_test = &r600_uvd_ring_test,
1454 .ib_test = &r600_uvd_ib_test,
1455 .is_lockup = &radeon_ring_test_lockup,
1456 .get_rptr = &radeon_ring_generic_get_rptr,
1457 .get_wptr = &radeon_ring_generic_get_wptr,
1458 .set_wptr = &radeon_ring_generic_set_wptr,
1459 }
1460 }, 1303 },
1461 .irq = { 1304 .irq = {
1462 .set = &evergreen_irq_set, 1305 .set = &evergreen_irq_set,
@@ -1472,7 +1315,7 @@ static struct radeon_asic evergreen_asic = {
1472 .hdmi_setmode = &evergreen_hdmi_setmode, 1315 .hdmi_setmode = &evergreen_hdmi_setmode,
1473 }, 1316 },
1474 .copy = { 1317 .copy = {
1475 .blit = &r600_copy_blit, 1318 .blit = &r600_copy_cpdma,
1476 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1319 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1477 .dma = &evergreen_copy_dma, 1320 .dma = &evergreen_copy_dma,
1478 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1321 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1546,42 +1389,9 @@ static struct radeon_asic sumo_asic = {
1546 .set_page = &rs600_gart_set_page, 1389 .set_page = &rs600_gart_set_page,
1547 }, 1390 },
1548 .ring = { 1391 .ring = {
1549 [RADEON_RING_TYPE_GFX_INDEX] = { 1392 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
1550 .ib_execute = &evergreen_ring_ib_execute, 1393 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
1551 .emit_fence = &r600_fence_ring_emit, 1394 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1552 .emit_semaphore = &r600_semaphore_ring_emit,
1553 .cs_parse = &evergreen_cs_parse,
1554 .ring_test = &r600_ring_test,
1555 .ib_test = &r600_ib_test,
1556 .is_lockup = &evergreen_gfx_is_lockup,
1557 .get_rptr = &radeon_ring_generic_get_rptr,
1558 .get_wptr = &radeon_ring_generic_get_wptr,
1559 .set_wptr = &radeon_ring_generic_set_wptr,
1560 },
1561 [R600_RING_TYPE_DMA_INDEX] = {
1562 .ib_execute = &evergreen_dma_ring_ib_execute,
1563 .emit_fence = &evergreen_dma_fence_ring_emit,
1564 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1565 .cs_parse = &evergreen_dma_cs_parse,
1566 .ring_test = &r600_dma_ring_test,
1567 .ib_test = &r600_dma_ib_test,
1568 .is_lockup = &evergreen_dma_is_lockup,
1569 .get_rptr = &radeon_ring_generic_get_rptr,
1570 .get_wptr = &radeon_ring_generic_get_wptr,
1571 .set_wptr = &radeon_ring_generic_set_wptr,
1572 },
1573 [R600_RING_TYPE_UVD_INDEX] = {
1574 .ib_execute = &r600_uvd_ib_execute,
1575 .emit_fence = &r600_uvd_fence_emit,
1576 .emit_semaphore = &r600_uvd_semaphore_emit,
1577 .cs_parse = &radeon_uvd_cs_parse,
1578 .ring_test = &r600_uvd_ring_test,
1579 .ib_test = &r600_uvd_ib_test,
1580 .is_lockup = &radeon_ring_test_lockup,
1581 .get_rptr = &radeon_ring_generic_get_rptr,
1582 .get_wptr = &radeon_ring_generic_get_wptr,
1583 .set_wptr = &radeon_ring_generic_set_wptr,
1584 }
1585 }, 1395 },
1586 .irq = { 1396 .irq = {
1587 .set = &evergreen_irq_set, 1397 .set = &evergreen_irq_set,
@@ -1597,7 +1407,7 @@ static struct radeon_asic sumo_asic = {
1597 .hdmi_setmode = &evergreen_hdmi_setmode, 1407 .hdmi_setmode = &evergreen_hdmi_setmode,
1598 }, 1408 },
1599 .copy = { 1409 .copy = {
1600 .blit = &r600_copy_blit, 1410 .blit = &r600_copy_cpdma,
1601 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1411 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1602 .dma = &evergreen_copy_dma, 1412 .dma = &evergreen_copy_dma,
1603 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1413 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1670,42 +1480,9 @@ static struct radeon_asic btc_asic = {
1670 .set_page = &rs600_gart_set_page, 1480 .set_page = &rs600_gart_set_page,
1671 }, 1481 },
1672 .ring = { 1482 .ring = {
1673 [RADEON_RING_TYPE_GFX_INDEX] = { 1483 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
1674 .ib_execute = &evergreen_ring_ib_execute, 1484 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
1675 .emit_fence = &r600_fence_ring_emit, 1485 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1676 .emit_semaphore = &r600_semaphore_ring_emit,
1677 .cs_parse = &evergreen_cs_parse,
1678 .ring_test = &r600_ring_test,
1679 .ib_test = &r600_ib_test,
1680 .is_lockup = &evergreen_gfx_is_lockup,
1681 .get_rptr = &radeon_ring_generic_get_rptr,
1682 .get_wptr = &radeon_ring_generic_get_wptr,
1683 .set_wptr = &radeon_ring_generic_set_wptr,
1684 },
1685 [R600_RING_TYPE_DMA_INDEX] = {
1686 .ib_execute = &evergreen_dma_ring_ib_execute,
1687 .emit_fence = &evergreen_dma_fence_ring_emit,
1688 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1689 .cs_parse = &evergreen_dma_cs_parse,
1690 .ring_test = &r600_dma_ring_test,
1691 .ib_test = &r600_dma_ib_test,
1692 .is_lockup = &evergreen_dma_is_lockup,
1693 .get_rptr = &radeon_ring_generic_get_rptr,
1694 .get_wptr = &radeon_ring_generic_get_wptr,
1695 .set_wptr = &radeon_ring_generic_set_wptr,
1696 },
1697 [R600_RING_TYPE_UVD_INDEX] = {
1698 .ib_execute = &r600_uvd_ib_execute,
1699 .emit_fence = &r600_uvd_fence_emit,
1700 .emit_semaphore = &r600_uvd_semaphore_emit,
1701 .cs_parse = &radeon_uvd_cs_parse,
1702 .ring_test = &r600_uvd_ring_test,
1703 .ib_test = &r600_uvd_ib_test,
1704 .is_lockup = &radeon_ring_test_lockup,
1705 .get_rptr = &radeon_ring_generic_get_rptr,
1706 .get_wptr = &radeon_ring_generic_get_wptr,
1707 .set_wptr = &radeon_ring_generic_set_wptr,
1708 }
1709 }, 1486 },
1710 .irq = { 1487 .irq = {
1711 .set = &evergreen_irq_set, 1488 .set = &evergreen_irq_set,
@@ -1721,7 +1498,7 @@ static struct radeon_asic btc_asic = {
1721 .hdmi_setmode = &evergreen_hdmi_setmode, 1498 .hdmi_setmode = &evergreen_hdmi_setmode,
1722 }, 1499 },
1723 .copy = { 1500 .copy = {
1724 .blit = &r600_copy_blit, 1501 .blit = &r600_copy_cpdma,
1725 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1502 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1726 .dma = &evergreen_copy_dma, 1503 .dma = &evergreen_copy_dma,
1727 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1504 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1778,6 +1555,49 @@ static struct radeon_asic btc_asic = {
1778 }, 1555 },
1779}; 1556};
1780 1557
1558static struct radeon_asic_ring cayman_gfx_ring = {
1559 .ib_execute = &cayman_ring_ib_execute,
1560 .ib_parse = &evergreen_ib_parse,
1561 .emit_fence = &cayman_fence_ring_emit,
1562 .emit_semaphore = &r600_semaphore_ring_emit,
1563 .cs_parse = &evergreen_cs_parse,
1564 .ring_test = &r600_ring_test,
1565 .ib_test = &r600_ib_test,
1566 .is_lockup = &cayman_gfx_is_lockup,
1567 .vm_flush = &cayman_vm_flush,
1568 .get_rptr = &radeon_ring_generic_get_rptr,
1569 .get_wptr = &radeon_ring_generic_get_wptr,
1570 .set_wptr = &radeon_ring_generic_set_wptr,
1571};
1572
1573static struct radeon_asic_ring cayman_dma_ring = {
1574 .ib_execute = &cayman_dma_ring_ib_execute,
1575 .ib_parse = &evergreen_dma_ib_parse,
1576 .emit_fence = &evergreen_dma_fence_ring_emit,
1577 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1578 .cs_parse = &evergreen_dma_cs_parse,
1579 .ring_test = &r600_dma_ring_test,
1580 .ib_test = &r600_dma_ib_test,
1581 .is_lockup = &cayman_dma_is_lockup,
1582 .vm_flush = &cayman_dma_vm_flush,
1583 .get_rptr = &r600_dma_get_rptr,
1584 .get_wptr = &r600_dma_get_wptr,
1585 .set_wptr = &r600_dma_set_wptr
1586};
1587
1588static struct radeon_asic_ring cayman_uvd_ring = {
1589 .ib_execute = &uvd_v1_0_ib_execute,
1590 .emit_fence = &uvd_v2_2_fence_emit,
1591 .emit_semaphore = &uvd_v3_1_semaphore_emit,
1592 .cs_parse = &radeon_uvd_cs_parse,
1593 .ring_test = &uvd_v1_0_ring_test,
1594 .ib_test = &uvd_v1_0_ib_test,
1595 .is_lockup = &radeon_ring_test_lockup,
1596 .get_rptr = &uvd_v1_0_get_rptr,
1597 .get_wptr = &uvd_v1_0_get_wptr,
1598 .set_wptr = &uvd_v1_0_set_wptr,
1599};
1600
1781static struct radeon_asic cayman_asic = { 1601static struct radeon_asic cayman_asic = {
1782 .init = &cayman_init, 1602 .init = &cayman_init,
1783 .fini = &cayman_fini, 1603 .fini = &cayman_fini,
@@ -1801,88 +1621,12 @@ static struct radeon_asic cayman_asic = {
1801 .set_page = &cayman_vm_set_page, 1621 .set_page = &cayman_vm_set_page,
1802 }, 1622 },
1803 .ring = { 1623 .ring = {
1804 [RADEON_RING_TYPE_GFX_INDEX] = { 1624 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
1805 .ib_execute = &cayman_ring_ib_execute, 1625 [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
1806 .ib_parse = &evergreen_ib_parse, 1626 [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
1807 .emit_fence = &cayman_fence_ring_emit, 1627 [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
1808 .emit_semaphore = &r600_semaphore_ring_emit, 1628 [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
1809 .cs_parse = &evergreen_cs_parse, 1629 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
1810 .ring_test = &r600_ring_test,
1811 .ib_test = &r600_ib_test,
1812 .is_lockup = &cayman_gfx_is_lockup,
1813 .vm_flush = &cayman_vm_flush,
1814 .get_rptr = &radeon_ring_generic_get_rptr,
1815 .get_wptr = &radeon_ring_generic_get_wptr,
1816 .set_wptr = &radeon_ring_generic_set_wptr,
1817 },
1818 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1819 .ib_execute = &cayman_ring_ib_execute,
1820 .ib_parse = &evergreen_ib_parse,
1821 .emit_fence = &cayman_fence_ring_emit,
1822 .emit_semaphore = &r600_semaphore_ring_emit,
1823 .cs_parse = &evergreen_cs_parse,
1824 .ring_test = &r600_ring_test,
1825 .ib_test = &r600_ib_test,
1826 .is_lockup = &cayman_gfx_is_lockup,
1827 .vm_flush = &cayman_vm_flush,
1828 .get_rptr = &radeon_ring_generic_get_rptr,
1829 .get_wptr = &radeon_ring_generic_get_wptr,
1830 .set_wptr = &radeon_ring_generic_set_wptr,
1831 },
1832 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1833 .ib_execute = &cayman_ring_ib_execute,
1834 .ib_parse = &evergreen_ib_parse,
1835 .emit_fence = &cayman_fence_ring_emit,
1836 .emit_semaphore = &r600_semaphore_ring_emit,
1837 .cs_parse = &evergreen_cs_parse,
1838 .ring_test = &r600_ring_test,
1839 .ib_test = &r600_ib_test,
1840 .is_lockup = &cayman_gfx_is_lockup,
1841 .vm_flush = &cayman_vm_flush,
1842 .get_rptr = &radeon_ring_generic_get_rptr,
1843 .get_wptr = &radeon_ring_generic_get_wptr,
1844 .set_wptr = &radeon_ring_generic_set_wptr,
1845 },
1846 [R600_RING_TYPE_DMA_INDEX] = {
1847 .ib_execute = &cayman_dma_ring_ib_execute,
1848 .ib_parse = &evergreen_dma_ib_parse,
1849 .emit_fence = &evergreen_dma_fence_ring_emit,
1850 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1851 .cs_parse = &evergreen_dma_cs_parse,
1852 .ring_test = &r600_dma_ring_test,
1853 .ib_test = &r600_dma_ib_test,
1854 .is_lockup = &cayman_dma_is_lockup,
1855 .vm_flush = &cayman_dma_vm_flush,
1856 .get_rptr = &radeon_ring_generic_get_rptr,
1857 .get_wptr = &radeon_ring_generic_get_wptr,
1858 .set_wptr = &radeon_ring_generic_set_wptr,
1859 },
1860 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1861 .ib_execute = &cayman_dma_ring_ib_execute,
1862 .ib_parse = &evergreen_dma_ib_parse,
1863 .emit_fence = &evergreen_dma_fence_ring_emit,
1864 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1865 .cs_parse = &evergreen_dma_cs_parse,
1866 .ring_test = &r600_dma_ring_test,
1867 .ib_test = &r600_dma_ib_test,
1868 .is_lockup = &cayman_dma_is_lockup,
1869 .vm_flush = &cayman_dma_vm_flush,
1870 .get_rptr = &radeon_ring_generic_get_rptr,
1871 .get_wptr = &radeon_ring_generic_get_wptr,
1872 .set_wptr = &radeon_ring_generic_set_wptr,
1873 },
1874 [R600_RING_TYPE_UVD_INDEX] = {
1875 .ib_execute = &r600_uvd_ib_execute,
1876 .emit_fence = &r600_uvd_fence_emit,
1877 .emit_semaphore = &cayman_uvd_semaphore_emit,
1878 .cs_parse = &radeon_uvd_cs_parse,
1879 .ring_test = &r600_uvd_ring_test,
1880 .ib_test = &r600_uvd_ib_test,
1881 .is_lockup = &radeon_ring_test_lockup,
1882 .get_rptr = &radeon_ring_generic_get_rptr,
1883 .get_wptr = &radeon_ring_generic_get_wptr,
1884 .set_wptr = &radeon_ring_generic_set_wptr,
1885 }
1886 }, 1630 },
1887 .irq = { 1631 .irq = {
1888 .set = &evergreen_irq_set, 1632 .set = &evergreen_irq_set,
@@ -1898,7 +1642,7 @@ static struct radeon_asic cayman_asic = {
1898 .hdmi_setmode = &evergreen_hdmi_setmode, 1642 .hdmi_setmode = &evergreen_hdmi_setmode,
1899 }, 1643 },
1900 .copy = { 1644 .copy = {
1901 .blit = &r600_copy_blit, 1645 .blit = &r600_copy_cpdma,
1902 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1646 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1903 .dma = &evergreen_copy_dma, 1647 .dma = &evergreen_copy_dma,
1904 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1648 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1978,88 +1722,12 @@ static struct radeon_asic trinity_asic = {
1978 .set_page = &cayman_vm_set_page, 1722 .set_page = &cayman_vm_set_page,
1979 }, 1723 },
1980 .ring = { 1724 .ring = {
1981 [RADEON_RING_TYPE_GFX_INDEX] = { 1725 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
1982 .ib_execute = &cayman_ring_ib_execute, 1726 [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
1983 .ib_parse = &evergreen_ib_parse, 1727 [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
1984 .emit_fence = &cayman_fence_ring_emit, 1728 [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
1985 .emit_semaphore = &r600_semaphore_ring_emit, 1729 [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
1986 .cs_parse = &evergreen_cs_parse, 1730 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
1987 .ring_test = &r600_ring_test,
1988 .ib_test = &r600_ib_test,
1989 .is_lockup = &cayman_gfx_is_lockup,
1990 .vm_flush = &cayman_vm_flush,
1991 .get_rptr = &radeon_ring_generic_get_rptr,
1992 .get_wptr = &radeon_ring_generic_get_wptr,
1993 .set_wptr = &radeon_ring_generic_set_wptr,
1994 },
1995 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1996 .ib_execute = &cayman_ring_ib_execute,
1997 .ib_parse = &evergreen_ib_parse,
1998 .emit_fence = &cayman_fence_ring_emit,
1999 .emit_semaphore = &r600_semaphore_ring_emit,
2000 .cs_parse = &evergreen_cs_parse,
2001 .ring_test = &r600_ring_test,
2002 .ib_test = &r600_ib_test,
2003 .is_lockup = &cayman_gfx_is_lockup,
2004 .vm_flush = &cayman_vm_flush,
2005 .get_rptr = &radeon_ring_generic_get_rptr,
2006 .get_wptr = &radeon_ring_generic_get_wptr,
2007 .set_wptr = &radeon_ring_generic_set_wptr,
2008 },
2009 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2010 .ib_execute = &cayman_ring_ib_execute,
2011 .ib_parse = &evergreen_ib_parse,
2012 .emit_fence = &cayman_fence_ring_emit,
2013 .emit_semaphore = &r600_semaphore_ring_emit,
2014 .cs_parse = &evergreen_cs_parse,
2015 .ring_test = &r600_ring_test,
2016 .ib_test = &r600_ib_test,
2017 .is_lockup = &cayman_gfx_is_lockup,
2018 .vm_flush = &cayman_vm_flush,
2019 .get_rptr = &radeon_ring_generic_get_rptr,
2020 .get_wptr = &radeon_ring_generic_get_wptr,
2021 .set_wptr = &radeon_ring_generic_set_wptr,
2022 },
2023 [R600_RING_TYPE_DMA_INDEX] = {
2024 .ib_execute = &cayman_dma_ring_ib_execute,
2025 .ib_parse = &evergreen_dma_ib_parse,
2026 .emit_fence = &evergreen_dma_fence_ring_emit,
2027 .emit_semaphore = &r600_dma_semaphore_ring_emit,
2028 .cs_parse = &evergreen_dma_cs_parse,
2029 .ring_test = &r600_dma_ring_test,
2030 .ib_test = &r600_dma_ib_test,
2031 .is_lockup = &cayman_dma_is_lockup,
2032 .vm_flush = &cayman_dma_vm_flush,
2033 .get_rptr = &radeon_ring_generic_get_rptr,
2034 .get_wptr = &radeon_ring_generic_get_wptr,
2035 .set_wptr = &radeon_ring_generic_set_wptr,
2036 },
2037 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2038 .ib_execute = &cayman_dma_ring_ib_execute,
2039 .ib_parse = &evergreen_dma_ib_parse,
2040 .emit_fence = &evergreen_dma_fence_ring_emit,
2041 .emit_semaphore = &r600_dma_semaphore_ring_emit,
2042 .cs_parse = &evergreen_dma_cs_parse,
2043 .ring_test = &r600_dma_ring_test,
2044 .ib_test = &r600_dma_ib_test,
2045 .is_lockup = &cayman_dma_is_lockup,
2046 .vm_flush = &cayman_dma_vm_flush,
2047 .get_rptr = &radeon_ring_generic_get_rptr,
2048 .get_wptr = &radeon_ring_generic_get_wptr,
2049 .set_wptr = &radeon_ring_generic_set_wptr,
2050 },
2051 [R600_RING_TYPE_UVD_INDEX] = {
2052 .ib_execute = &r600_uvd_ib_execute,
2053 .emit_fence = &r600_uvd_fence_emit,
2054 .emit_semaphore = &cayman_uvd_semaphore_emit,
2055 .cs_parse = &radeon_uvd_cs_parse,
2056 .ring_test = &r600_uvd_ring_test,
2057 .ib_test = &r600_uvd_ib_test,
2058 .is_lockup = &radeon_ring_test_lockup,
2059 .get_rptr = &radeon_ring_generic_get_rptr,
2060 .get_wptr = &radeon_ring_generic_get_wptr,
2061 .set_wptr = &radeon_ring_generic_set_wptr,
2062 }
2063 }, 1731 },
2064 .irq = { 1732 .irq = {
2065 .set = &evergreen_irq_set, 1733 .set = &evergreen_irq_set,
@@ -2071,9 +1739,11 @@ static struct radeon_asic trinity_asic = {
2071 .wait_for_vblank = &dce4_wait_for_vblank, 1739 .wait_for_vblank = &dce4_wait_for_vblank,
2072 .set_backlight_level = &atombios_set_backlight_level, 1740 .set_backlight_level = &atombios_set_backlight_level,
2073 .get_backlight_level = &atombios_get_backlight_level, 1741 .get_backlight_level = &atombios_get_backlight_level,
1742 .hdmi_enable = &evergreen_hdmi_enable,
1743 .hdmi_setmode = &evergreen_hdmi_setmode,
2074 }, 1744 },
2075 .copy = { 1745 .copy = {
2076 .blit = &r600_copy_blit, 1746 .blit = &r600_copy_cpdma,
2077 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1747 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2078 .dma = &evergreen_copy_dma, 1748 .dma = &evergreen_copy_dma,
2079 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1749 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2129,6 +1799,36 @@ static struct radeon_asic trinity_asic = {
2129 }, 1799 },
2130}; 1800};
2131 1801
1802static struct radeon_asic_ring si_gfx_ring = {
1803 .ib_execute = &si_ring_ib_execute,
1804 .ib_parse = &si_ib_parse,
1805 .emit_fence = &si_fence_ring_emit,
1806 .emit_semaphore = &r600_semaphore_ring_emit,
1807 .cs_parse = NULL,
1808 .ring_test = &r600_ring_test,
1809 .ib_test = &r600_ib_test,
1810 .is_lockup = &si_gfx_is_lockup,
1811 .vm_flush = &si_vm_flush,
1812 .get_rptr = &radeon_ring_generic_get_rptr,
1813 .get_wptr = &radeon_ring_generic_get_wptr,
1814 .set_wptr = &radeon_ring_generic_set_wptr,
1815};
1816
1817static struct radeon_asic_ring si_dma_ring = {
1818 .ib_execute = &cayman_dma_ring_ib_execute,
1819 .ib_parse = &evergreen_dma_ib_parse,
1820 .emit_fence = &evergreen_dma_fence_ring_emit,
1821 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1822 .cs_parse = NULL,
1823 .ring_test = &r600_dma_ring_test,
1824 .ib_test = &r600_dma_ib_test,
1825 .is_lockup = &si_dma_is_lockup,
1826 .vm_flush = &si_dma_vm_flush,
1827 .get_rptr = &r600_dma_get_rptr,
1828 .get_wptr = &r600_dma_get_wptr,
1829 .set_wptr = &r600_dma_set_wptr,
1830};
1831
2132static struct radeon_asic si_asic = { 1832static struct radeon_asic si_asic = {
2133 .init = &si_init, 1833 .init = &si_init,
2134 .fini = &si_fini, 1834 .fini = &si_fini,
@@ -2152,88 +1852,12 @@ static struct radeon_asic si_asic = {
2152 .set_page = &si_vm_set_page, 1852 .set_page = &si_vm_set_page,
2153 }, 1853 },
2154 .ring = { 1854 .ring = {
2155 [RADEON_RING_TYPE_GFX_INDEX] = { 1855 [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
2156 .ib_execute = &si_ring_ib_execute, 1856 [CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring,
2157 .ib_parse = &si_ib_parse, 1857 [CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring,
2158 .emit_fence = &si_fence_ring_emit, 1858 [R600_RING_TYPE_DMA_INDEX] = &si_dma_ring,
2159 .emit_semaphore = &r600_semaphore_ring_emit, 1859 [CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring,
2160 .cs_parse = NULL, 1860 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
2161 .ring_test = &r600_ring_test,
2162 .ib_test = &r600_ib_test,
2163 .is_lockup = &si_gfx_is_lockup,
2164 .vm_flush = &si_vm_flush,
2165 .get_rptr = &radeon_ring_generic_get_rptr,
2166 .get_wptr = &radeon_ring_generic_get_wptr,
2167 .set_wptr = &radeon_ring_generic_set_wptr,
2168 },
2169 [CAYMAN_RING_TYPE_CP1_INDEX] = {
2170 .ib_execute = &si_ring_ib_execute,
2171 .ib_parse = &si_ib_parse,
2172 .emit_fence = &si_fence_ring_emit,
2173 .emit_semaphore = &r600_semaphore_ring_emit,
2174 .cs_parse = NULL,
2175 .ring_test = &r600_ring_test,
2176 .ib_test = &r600_ib_test,
2177 .is_lockup = &si_gfx_is_lockup,
2178 .vm_flush = &si_vm_flush,
2179 .get_rptr = &radeon_ring_generic_get_rptr,
2180 .get_wptr = &radeon_ring_generic_get_wptr,
2181 .set_wptr = &radeon_ring_generic_set_wptr,
2182 },
2183 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2184 .ib_execute = &si_ring_ib_execute,
2185 .ib_parse = &si_ib_parse,
2186 .emit_fence = &si_fence_ring_emit,
2187 .emit_semaphore = &r600_semaphore_ring_emit,
2188 .cs_parse = NULL,
2189 .ring_test = &r600_ring_test,
2190 .ib_test = &r600_ib_test,
2191 .is_lockup = &si_gfx_is_lockup,
2192 .vm_flush = &si_vm_flush,
2193 .get_rptr = &radeon_ring_generic_get_rptr,
2194 .get_wptr = &radeon_ring_generic_get_wptr,
2195 .set_wptr = &radeon_ring_generic_set_wptr,
2196 },
2197 [R600_RING_TYPE_DMA_INDEX] = {
2198 .ib_execute = &cayman_dma_ring_ib_execute,
2199 .ib_parse = &evergreen_dma_ib_parse,
2200 .emit_fence = &evergreen_dma_fence_ring_emit,
2201 .emit_semaphore = &r600_dma_semaphore_ring_emit,
2202 .cs_parse = NULL,
2203 .ring_test = &r600_dma_ring_test,
2204 .ib_test = &r600_dma_ib_test,
2205 .is_lockup = &si_dma_is_lockup,
2206 .vm_flush = &si_dma_vm_flush,
2207 .get_rptr = &radeon_ring_generic_get_rptr,
2208 .get_wptr = &radeon_ring_generic_get_wptr,
2209 .set_wptr = &radeon_ring_generic_set_wptr,
2210 },
2211 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2212 .ib_execute = &cayman_dma_ring_ib_execute,
2213 .ib_parse = &evergreen_dma_ib_parse,
2214 .emit_fence = &evergreen_dma_fence_ring_emit,
2215 .emit_semaphore = &r600_dma_semaphore_ring_emit,
2216 .cs_parse = NULL,
2217 .ring_test = &r600_dma_ring_test,
2218 .ib_test = &r600_dma_ib_test,
2219 .is_lockup = &si_dma_is_lockup,
2220 .vm_flush = &si_dma_vm_flush,
2221 .get_rptr = &radeon_ring_generic_get_rptr,
2222 .get_wptr = &radeon_ring_generic_get_wptr,
2223 .set_wptr = &radeon_ring_generic_set_wptr,
2224 },
2225 [R600_RING_TYPE_UVD_INDEX] = {
2226 .ib_execute = &r600_uvd_ib_execute,
2227 .emit_fence = &r600_uvd_fence_emit,
2228 .emit_semaphore = &cayman_uvd_semaphore_emit,
2229 .cs_parse = &radeon_uvd_cs_parse,
2230 .ring_test = &r600_uvd_ring_test,
2231 .ib_test = &r600_uvd_ib_test,
2232 .is_lockup = &radeon_ring_test_lockup,
2233 .get_rptr = &radeon_ring_generic_get_rptr,
2234 .get_wptr = &radeon_ring_generic_get_wptr,
2235 .set_wptr = &radeon_ring_generic_set_wptr,
2236 }
2237 }, 1861 },
2238 .irq = { 1862 .irq = {
2239 .set = &si_irq_set, 1863 .set = &si_irq_set,
@@ -2245,6 +1869,8 @@ static struct radeon_asic si_asic = {
2245 .wait_for_vblank = &dce4_wait_for_vblank, 1869 .wait_for_vblank = &dce4_wait_for_vblank,
2246 .set_backlight_level = &atombios_set_backlight_level, 1870 .set_backlight_level = &atombios_set_backlight_level,
2247 .get_backlight_level = &atombios_get_backlight_level, 1871 .get_backlight_level = &atombios_get_backlight_level,
1872 .hdmi_enable = &evergreen_hdmi_enable,
1873 .hdmi_setmode = &evergreen_hdmi_setmode,
2248 }, 1874 },
2249 .copy = { 1875 .copy = {
2250 .blit = NULL, 1876 .blit = NULL,
@@ -2304,6 +1930,51 @@ static struct radeon_asic si_asic = {
2304 }, 1930 },
2305}; 1931};
2306 1932
1933static struct radeon_asic_ring ci_gfx_ring = {
1934 .ib_execute = &cik_ring_ib_execute,
1935 .ib_parse = &cik_ib_parse,
1936 .emit_fence = &cik_fence_gfx_ring_emit,
1937 .emit_semaphore = &cik_semaphore_ring_emit,
1938 .cs_parse = NULL,
1939 .ring_test = &cik_ring_test,
1940 .ib_test = &cik_ib_test,
1941 .is_lockup = &cik_gfx_is_lockup,
1942 .vm_flush = &cik_vm_flush,
1943 .get_rptr = &radeon_ring_generic_get_rptr,
1944 .get_wptr = &radeon_ring_generic_get_wptr,
1945 .set_wptr = &radeon_ring_generic_set_wptr,
1946};
1947
1948static struct radeon_asic_ring ci_cp_ring = {
1949 .ib_execute = &cik_ring_ib_execute,
1950 .ib_parse = &cik_ib_parse,
1951 .emit_fence = &cik_fence_compute_ring_emit,
1952 .emit_semaphore = &cik_semaphore_ring_emit,
1953 .cs_parse = NULL,
1954 .ring_test = &cik_ring_test,
1955 .ib_test = &cik_ib_test,
1956 .is_lockup = &cik_gfx_is_lockup,
1957 .vm_flush = &cik_vm_flush,
1958 .get_rptr = &cik_compute_ring_get_rptr,
1959 .get_wptr = &cik_compute_ring_get_wptr,
1960 .set_wptr = &cik_compute_ring_set_wptr,
1961};
1962
1963static struct radeon_asic_ring ci_dma_ring = {
1964 .ib_execute = &cik_sdma_ring_ib_execute,
1965 .ib_parse = &cik_ib_parse,
1966 .emit_fence = &cik_sdma_fence_ring_emit,
1967 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
1968 .cs_parse = NULL,
1969 .ring_test = &cik_sdma_ring_test,
1970 .ib_test = &cik_sdma_ib_test,
1971 .is_lockup = &cik_sdma_is_lockup,
1972 .vm_flush = &cik_dma_vm_flush,
1973 .get_rptr = &r600_dma_get_rptr,
1974 .get_wptr = &r600_dma_get_wptr,
1975 .set_wptr = &r600_dma_set_wptr,
1976};
1977
2307static struct radeon_asic ci_asic = { 1978static struct radeon_asic ci_asic = {
2308 .init = &cik_init, 1979 .init = &cik_init,
2309 .fini = &cik_fini, 1980 .fini = &cik_fini,
@@ -2327,88 +1998,12 @@ static struct radeon_asic ci_asic = {
2327 .set_page = &cik_vm_set_page, 1998 .set_page = &cik_vm_set_page,
2328 }, 1999 },
2329 .ring = { 2000 .ring = {
2330 [RADEON_RING_TYPE_GFX_INDEX] = { 2001 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
2331 .ib_execute = &cik_ring_ib_execute, 2002 [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
2332 .ib_parse = &cik_ib_parse, 2003 [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
2333 .emit_fence = &cik_fence_gfx_ring_emit, 2004 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
2334 .emit_semaphore = &cik_semaphore_ring_emit, 2005 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
2335 .cs_parse = NULL, 2006 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
2336 .ring_test = &cik_ring_test,
2337 .ib_test = &cik_ib_test,
2338 .is_lockup = &cik_gfx_is_lockup,
2339 .vm_flush = &cik_vm_flush,
2340 .get_rptr = &radeon_ring_generic_get_rptr,
2341 .get_wptr = &radeon_ring_generic_get_wptr,
2342 .set_wptr = &radeon_ring_generic_set_wptr,
2343 },
2344 [CAYMAN_RING_TYPE_CP1_INDEX] = {
2345 .ib_execute = &cik_ring_ib_execute,
2346 .ib_parse = &cik_ib_parse,
2347 .emit_fence = &cik_fence_compute_ring_emit,
2348 .emit_semaphore = &cik_semaphore_ring_emit,
2349 .cs_parse = NULL,
2350 .ring_test = &cik_ring_test,
2351 .ib_test = &cik_ib_test,
2352 .is_lockup = &cik_gfx_is_lockup,
2353 .vm_flush = &cik_vm_flush,
2354 .get_rptr = &cik_compute_ring_get_rptr,
2355 .get_wptr = &cik_compute_ring_get_wptr,
2356 .set_wptr = &cik_compute_ring_set_wptr,
2357 },
2358 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2359 .ib_execute = &cik_ring_ib_execute,
2360 .ib_parse = &cik_ib_parse,
2361 .emit_fence = &cik_fence_compute_ring_emit,
2362 .emit_semaphore = &cik_semaphore_ring_emit,
2363 .cs_parse = NULL,
2364 .ring_test = &cik_ring_test,
2365 .ib_test = &cik_ib_test,
2366 .is_lockup = &cik_gfx_is_lockup,
2367 .vm_flush = &cik_vm_flush,
2368 .get_rptr = &cik_compute_ring_get_rptr,
2369 .get_wptr = &cik_compute_ring_get_wptr,
2370 .set_wptr = &cik_compute_ring_set_wptr,
2371 },
2372 [R600_RING_TYPE_DMA_INDEX] = {
2373 .ib_execute = &cik_sdma_ring_ib_execute,
2374 .ib_parse = &cik_ib_parse,
2375 .emit_fence = &cik_sdma_fence_ring_emit,
2376 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2377 .cs_parse = NULL,
2378 .ring_test = &cik_sdma_ring_test,
2379 .ib_test = &cik_sdma_ib_test,
2380 .is_lockup = &cik_sdma_is_lockup,
2381 .vm_flush = &cik_dma_vm_flush,
2382 .get_rptr = &radeon_ring_generic_get_rptr,
2383 .get_wptr = &radeon_ring_generic_get_wptr,
2384 .set_wptr = &radeon_ring_generic_set_wptr,
2385 },
2386 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2387 .ib_execute = &cik_sdma_ring_ib_execute,
2388 .ib_parse = &cik_ib_parse,
2389 .emit_fence = &cik_sdma_fence_ring_emit,
2390 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2391 .cs_parse = NULL,
2392 .ring_test = &cik_sdma_ring_test,
2393 .ib_test = &cik_sdma_ib_test,
2394 .is_lockup = &cik_sdma_is_lockup,
2395 .vm_flush = &cik_dma_vm_flush,
2396 .get_rptr = &radeon_ring_generic_get_rptr,
2397 .get_wptr = &radeon_ring_generic_get_wptr,
2398 .set_wptr = &radeon_ring_generic_set_wptr,
2399 },
2400 [R600_RING_TYPE_UVD_INDEX] = {
2401 .ib_execute = &r600_uvd_ib_execute,
2402 .emit_fence = &r600_uvd_fence_emit,
2403 .emit_semaphore = &cayman_uvd_semaphore_emit,
2404 .cs_parse = &radeon_uvd_cs_parse,
2405 .ring_test = &r600_uvd_ring_test,
2406 .ib_test = &r600_uvd_ib_test,
2407 .is_lockup = &radeon_ring_test_lockup,
2408 .get_rptr = &radeon_ring_generic_get_rptr,
2409 .get_wptr = &radeon_ring_generic_get_wptr,
2410 .set_wptr = &radeon_ring_generic_set_wptr,
2411 }
2412 }, 2007 },
2413 .irq = { 2008 .irq = {
2414 .set = &cik_irq_set, 2009 .set = &cik_irq_set,
@@ -2418,6 +2013,8 @@ static struct radeon_asic ci_asic = {
2418 .bandwidth_update = &dce8_bandwidth_update, 2013 .bandwidth_update = &dce8_bandwidth_update,
2419 .get_vblank_counter = &evergreen_get_vblank_counter, 2014 .get_vblank_counter = &evergreen_get_vblank_counter,
2420 .wait_for_vblank = &dce4_wait_for_vblank, 2015 .wait_for_vblank = &dce4_wait_for_vblank,
2016 .hdmi_enable = &evergreen_hdmi_enable,
2017 .hdmi_setmode = &evergreen_hdmi_setmode,
2421 }, 2018 },
2422 .copy = { 2019 .copy = {
2423 .blit = NULL, 2020 .blit = NULL,
@@ -2451,6 +2048,25 @@ static struct radeon_asic ci_asic = {
2451 .set_pcie_lanes = NULL, 2048 .set_pcie_lanes = NULL,
2452 .set_clock_gating = NULL, 2049 .set_clock_gating = NULL,
2453 .set_uvd_clocks = &cik_set_uvd_clocks, 2050 .set_uvd_clocks = &cik_set_uvd_clocks,
2051 .get_temperature = &ci_get_temp,
2052 },
2053 .dpm = {
2054 .init = &ci_dpm_init,
2055 .setup_asic = &ci_dpm_setup_asic,
2056 .enable = &ci_dpm_enable,
2057 .disable = &ci_dpm_disable,
2058 .pre_set_power_state = &ci_dpm_pre_set_power_state,
2059 .set_power_state = &ci_dpm_set_power_state,
2060 .post_set_power_state = &ci_dpm_post_set_power_state,
2061 .display_configuration_changed = &ci_dpm_display_configuration_changed,
2062 .fini = &ci_dpm_fini,
2063 .get_sclk = &ci_dpm_get_sclk,
2064 .get_mclk = &ci_dpm_get_mclk,
2065 .print_power_state = &ci_dpm_print_power_state,
2066 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
2067 .force_performance_level = &ci_dpm_force_performance_level,
2068 .vblank_too_short = &ci_dpm_vblank_too_short,
2069 .powergate_uvd = &ci_dpm_powergate_uvd,
2454 }, 2070 },
2455 .pflip = { 2071 .pflip = {
2456 .pre_page_flip = &evergreen_pre_page_flip, 2072 .pre_page_flip = &evergreen_pre_page_flip,
@@ -2482,88 +2098,12 @@ static struct radeon_asic kv_asic = {
2482 .set_page = &cik_vm_set_page, 2098 .set_page = &cik_vm_set_page,
2483 }, 2099 },
2484 .ring = { 2100 .ring = {
2485 [RADEON_RING_TYPE_GFX_INDEX] = { 2101 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
2486 .ib_execute = &cik_ring_ib_execute, 2102 [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
2487 .ib_parse = &cik_ib_parse, 2103 [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
2488 .emit_fence = &cik_fence_gfx_ring_emit, 2104 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
2489 .emit_semaphore = &cik_semaphore_ring_emit, 2105 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
2490 .cs_parse = NULL, 2106 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
2491 .ring_test = &cik_ring_test,
2492 .ib_test = &cik_ib_test,
2493 .is_lockup = &cik_gfx_is_lockup,
2494 .vm_flush = &cik_vm_flush,
2495 .get_rptr = &radeon_ring_generic_get_rptr,
2496 .get_wptr = &radeon_ring_generic_get_wptr,
2497 .set_wptr = &radeon_ring_generic_set_wptr,
2498 },
2499 [CAYMAN_RING_TYPE_CP1_INDEX] = {
2500 .ib_execute = &cik_ring_ib_execute,
2501 .ib_parse = &cik_ib_parse,
2502 .emit_fence = &cik_fence_compute_ring_emit,
2503 .emit_semaphore = &cik_semaphore_ring_emit,
2504 .cs_parse = NULL,
2505 .ring_test = &cik_ring_test,
2506 .ib_test = &cik_ib_test,
2507 .is_lockup = &cik_gfx_is_lockup,
2508 .vm_flush = &cik_vm_flush,
2509 .get_rptr = &cik_compute_ring_get_rptr,
2510 .get_wptr = &cik_compute_ring_get_wptr,
2511 .set_wptr = &cik_compute_ring_set_wptr,
2512 },
2513 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2514 .ib_execute = &cik_ring_ib_execute,
2515 .ib_parse = &cik_ib_parse,
2516 .emit_fence = &cik_fence_compute_ring_emit,
2517 .emit_semaphore = &cik_semaphore_ring_emit,
2518 .cs_parse = NULL,
2519 .ring_test = &cik_ring_test,
2520 .ib_test = &cik_ib_test,
2521 .is_lockup = &cik_gfx_is_lockup,
2522 .vm_flush = &cik_vm_flush,
2523 .get_rptr = &cik_compute_ring_get_rptr,
2524 .get_wptr = &cik_compute_ring_get_wptr,
2525 .set_wptr = &cik_compute_ring_set_wptr,
2526 },
2527 [R600_RING_TYPE_DMA_INDEX] = {
2528 .ib_execute = &cik_sdma_ring_ib_execute,
2529 .ib_parse = &cik_ib_parse,
2530 .emit_fence = &cik_sdma_fence_ring_emit,
2531 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2532 .cs_parse = NULL,
2533 .ring_test = &cik_sdma_ring_test,
2534 .ib_test = &cik_sdma_ib_test,
2535 .is_lockup = &cik_sdma_is_lockup,
2536 .vm_flush = &cik_dma_vm_flush,
2537 .get_rptr = &radeon_ring_generic_get_rptr,
2538 .get_wptr = &radeon_ring_generic_get_wptr,
2539 .set_wptr = &radeon_ring_generic_set_wptr,
2540 },
2541 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2542 .ib_execute = &cik_sdma_ring_ib_execute,
2543 .ib_parse = &cik_ib_parse,
2544 .emit_fence = &cik_sdma_fence_ring_emit,
2545 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2546 .cs_parse = NULL,
2547 .ring_test = &cik_sdma_ring_test,
2548 .ib_test = &cik_sdma_ib_test,
2549 .is_lockup = &cik_sdma_is_lockup,
2550 .vm_flush = &cik_dma_vm_flush,
2551 .get_rptr = &radeon_ring_generic_get_rptr,
2552 .get_wptr = &radeon_ring_generic_get_wptr,
2553 .set_wptr = &radeon_ring_generic_set_wptr,
2554 },
2555 [R600_RING_TYPE_UVD_INDEX] = {
2556 .ib_execute = &r600_uvd_ib_execute,
2557 .emit_fence = &r600_uvd_fence_emit,
2558 .emit_semaphore = &cayman_uvd_semaphore_emit,
2559 .cs_parse = &radeon_uvd_cs_parse,
2560 .ring_test = &r600_uvd_ring_test,
2561 .ib_test = &r600_uvd_ib_test,
2562 .is_lockup = &radeon_ring_test_lockup,
2563 .get_rptr = &radeon_ring_generic_get_rptr,
2564 .get_wptr = &radeon_ring_generic_get_wptr,
2565 .set_wptr = &radeon_ring_generic_set_wptr,
2566 }
2567 }, 2107 },
2568 .irq = { 2108 .irq = {
2569 .set = &cik_irq_set, 2109 .set = &cik_irq_set,
@@ -2573,6 +2113,8 @@ static struct radeon_asic kv_asic = {
2573 .bandwidth_update = &dce8_bandwidth_update, 2113 .bandwidth_update = &dce8_bandwidth_update,
2574 .get_vblank_counter = &evergreen_get_vblank_counter, 2114 .get_vblank_counter = &evergreen_get_vblank_counter,
2575 .wait_for_vblank = &dce4_wait_for_vblank, 2115 .wait_for_vblank = &dce4_wait_for_vblank,
2116 .hdmi_enable = &evergreen_hdmi_enable,
2117 .hdmi_setmode = &evergreen_hdmi_setmode,
2576 }, 2118 },
2577 .copy = { 2119 .copy = {
2578 .blit = NULL, 2120 .blit = NULL,
@@ -2606,6 +2148,24 @@ static struct radeon_asic kv_asic = {
2606 .set_pcie_lanes = NULL, 2148 .set_pcie_lanes = NULL,
2607 .set_clock_gating = NULL, 2149 .set_clock_gating = NULL,
2608 .set_uvd_clocks = &cik_set_uvd_clocks, 2150 .set_uvd_clocks = &cik_set_uvd_clocks,
2151 .get_temperature = &kv_get_temp,
2152 },
2153 .dpm = {
2154 .init = &kv_dpm_init,
2155 .setup_asic = &kv_dpm_setup_asic,
2156 .enable = &kv_dpm_enable,
2157 .disable = &kv_dpm_disable,
2158 .pre_set_power_state = &kv_dpm_pre_set_power_state,
2159 .set_power_state = &kv_dpm_set_power_state,
2160 .post_set_power_state = &kv_dpm_post_set_power_state,
2161 .display_configuration_changed = &kv_dpm_display_configuration_changed,
2162 .fini = &kv_dpm_fini,
2163 .get_sclk = &kv_dpm_get_sclk,
2164 .get_mclk = &kv_dpm_get_mclk,
2165 .print_power_state = &kv_dpm_print_power_state,
2166 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
2167 .force_performance_level = &kv_dpm_force_performance_level,
2168 .powergate_uvd = &kv_dpm_powergate_uvd,
2609 }, 2169 },
2610 .pflip = { 2170 .pflip = {
2611 .pre_page_flip = &evergreen_pre_page_flip, 2171 .pre_page_flip = &evergreen_pre_page_flip,
@@ -2775,19 +2335,188 @@ int radeon_asic_init(struct radeon_device *rdev)
2775 rdev->has_uvd = false; 2335 rdev->has_uvd = false;
2776 else 2336 else
2777 rdev->has_uvd = true; 2337 rdev->has_uvd = true;
2338 switch (rdev->family) {
2339 case CHIP_TAHITI:
2340 rdev->cg_flags =
2341 RADEON_CG_SUPPORT_GFX_MGCG |
2342 RADEON_CG_SUPPORT_GFX_MGLS |
2343 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2344 RADEON_CG_SUPPORT_GFX_CGLS |
2345 RADEON_CG_SUPPORT_GFX_CGTS |
2346 RADEON_CG_SUPPORT_GFX_CP_LS |
2347 RADEON_CG_SUPPORT_MC_MGCG |
2348 RADEON_CG_SUPPORT_SDMA_MGCG |
2349 RADEON_CG_SUPPORT_BIF_LS |
2350 RADEON_CG_SUPPORT_VCE_MGCG |
2351 RADEON_CG_SUPPORT_UVD_MGCG |
2352 RADEON_CG_SUPPORT_HDP_LS |
2353 RADEON_CG_SUPPORT_HDP_MGCG;
2354 rdev->pg_flags = 0;
2355 break;
2356 case CHIP_PITCAIRN:
2357 rdev->cg_flags =
2358 RADEON_CG_SUPPORT_GFX_MGCG |
2359 RADEON_CG_SUPPORT_GFX_MGLS |
2360 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2361 RADEON_CG_SUPPORT_GFX_CGLS |
2362 RADEON_CG_SUPPORT_GFX_CGTS |
2363 RADEON_CG_SUPPORT_GFX_CP_LS |
2364 RADEON_CG_SUPPORT_GFX_RLC_LS |
2365 RADEON_CG_SUPPORT_MC_LS |
2366 RADEON_CG_SUPPORT_MC_MGCG |
2367 RADEON_CG_SUPPORT_SDMA_MGCG |
2368 RADEON_CG_SUPPORT_BIF_LS |
2369 RADEON_CG_SUPPORT_VCE_MGCG |
2370 RADEON_CG_SUPPORT_UVD_MGCG |
2371 RADEON_CG_SUPPORT_HDP_LS |
2372 RADEON_CG_SUPPORT_HDP_MGCG;
2373 rdev->pg_flags = 0;
2374 break;
2375 case CHIP_VERDE:
2376 rdev->cg_flags =
2377 RADEON_CG_SUPPORT_GFX_MGCG |
2378 RADEON_CG_SUPPORT_GFX_MGLS |
2379 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2380 RADEON_CG_SUPPORT_GFX_CGLS |
2381 RADEON_CG_SUPPORT_GFX_CGTS |
2382 RADEON_CG_SUPPORT_GFX_CP_LS |
2383 RADEON_CG_SUPPORT_GFX_RLC_LS |
2384 RADEON_CG_SUPPORT_MC_LS |
2385 RADEON_CG_SUPPORT_MC_MGCG |
2386 RADEON_CG_SUPPORT_SDMA_MGCG |
2387 RADEON_CG_SUPPORT_BIF_LS |
2388 RADEON_CG_SUPPORT_VCE_MGCG |
2389 RADEON_CG_SUPPORT_UVD_MGCG |
2390 RADEON_CG_SUPPORT_HDP_LS |
2391 RADEON_CG_SUPPORT_HDP_MGCG;
2392 rdev->pg_flags = 0 |
2393 /*RADEON_PG_SUPPORT_GFX_CG | */
2394 RADEON_PG_SUPPORT_SDMA;
2395 break;
2396 case CHIP_OLAND:
2397 rdev->cg_flags =
2398 RADEON_CG_SUPPORT_GFX_MGCG |
2399 RADEON_CG_SUPPORT_GFX_MGLS |
2400 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2401 RADEON_CG_SUPPORT_GFX_CGLS |
2402 RADEON_CG_SUPPORT_GFX_CGTS |
2403 RADEON_CG_SUPPORT_GFX_CP_LS |
2404 RADEON_CG_SUPPORT_GFX_RLC_LS |
2405 RADEON_CG_SUPPORT_MC_LS |
2406 RADEON_CG_SUPPORT_MC_MGCG |
2407 RADEON_CG_SUPPORT_SDMA_MGCG |
2408 RADEON_CG_SUPPORT_BIF_LS |
2409 RADEON_CG_SUPPORT_UVD_MGCG |
2410 RADEON_CG_SUPPORT_HDP_LS |
2411 RADEON_CG_SUPPORT_HDP_MGCG;
2412 rdev->pg_flags = 0;
2413 break;
2414 case CHIP_HAINAN:
2415 rdev->cg_flags =
2416 RADEON_CG_SUPPORT_GFX_MGCG |
2417 RADEON_CG_SUPPORT_GFX_MGLS |
2418 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2419 RADEON_CG_SUPPORT_GFX_CGLS |
2420 RADEON_CG_SUPPORT_GFX_CGTS |
2421 RADEON_CG_SUPPORT_GFX_CP_LS |
2422 RADEON_CG_SUPPORT_GFX_RLC_LS |
2423 RADEON_CG_SUPPORT_MC_LS |
2424 RADEON_CG_SUPPORT_MC_MGCG |
2425 RADEON_CG_SUPPORT_SDMA_MGCG |
2426 RADEON_CG_SUPPORT_BIF_LS |
2427 RADEON_CG_SUPPORT_HDP_LS |
2428 RADEON_CG_SUPPORT_HDP_MGCG;
2429 rdev->pg_flags = 0;
2430 break;
2431 default:
2432 rdev->cg_flags = 0;
2433 rdev->pg_flags = 0;
2434 break;
2435 }
2778 break; 2436 break;
2779 case CHIP_BONAIRE: 2437 case CHIP_BONAIRE:
2780 rdev->asic = &ci_asic; 2438 rdev->asic = &ci_asic;
2781 rdev->num_crtc = 6; 2439 rdev->num_crtc = 6;
2440 rdev->has_uvd = true;
2441 rdev->cg_flags =
2442 RADEON_CG_SUPPORT_GFX_MGCG |
2443 RADEON_CG_SUPPORT_GFX_MGLS |
2444 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2445 RADEON_CG_SUPPORT_GFX_CGLS |
2446 RADEON_CG_SUPPORT_GFX_CGTS |
2447 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2448 RADEON_CG_SUPPORT_GFX_CP_LS |
2449 RADEON_CG_SUPPORT_MC_LS |
2450 RADEON_CG_SUPPORT_MC_MGCG |
2451 RADEON_CG_SUPPORT_SDMA_MGCG |
2452 RADEON_CG_SUPPORT_SDMA_LS |
2453 RADEON_CG_SUPPORT_BIF_LS |
2454 RADEON_CG_SUPPORT_VCE_MGCG |
2455 RADEON_CG_SUPPORT_UVD_MGCG |
2456 RADEON_CG_SUPPORT_HDP_LS |
2457 RADEON_CG_SUPPORT_HDP_MGCG;
2458 rdev->pg_flags = 0;
2782 break; 2459 break;
2783 case CHIP_KAVERI: 2460 case CHIP_KAVERI:
2784 case CHIP_KABINI: 2461 case CHIP_KABINI:
2785 rdev->asic = &kv_asic; 2462 rdev->asic = &kv_asic;
2786 /* set num crtcs */ 2463 /* set num crtcs */
2787 if (rdev->family == CHIP_KAVERI) 2464 if (rdev->family == CHIP_KAVERI) {
2788 rdev->num_crtc = 4; 2465 rdev->num_crtc = 4;
2789 else 2466 rdev->cg_flags =
2467 RADEON_CG_SUPPORT_GFX_MGCG |
2468 RADEON_CG_SUPPORT_GFX_MGLS |
2469 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2470 RADEON_CG_SUPPORT_GFX_CGLS |
2471 RADEON_CG_SUPPORT_GFX_CGTS |
2472 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2473 RADEON_CG_SUPPORT_GFX_CP_LS |
2474 RADEON_CG_SUPPORT_SDMA_MGCG |
2475 RADEON_CG_SUPPORT_SDMA_LS |
2476 RADEON_CG_SUPPORT_BIF_LS |
2477 RADEON_CG_SUPPORT_VCE_MGCG |
2478 RADEON_CG_SUPPORT_UVD_MGCG |
2479 RADEON_CG_SUPPORT_HDP_LS |
2480 RADEON_CG_SUPPORT_HDP_MGCG;
2481 rdev->pg_flags = 0;
2482 /*RADEON_PG_SUPPORT_GFX_CG |
2483 RADEON_PG_SUPPORT_GFX_SMG |
2484 RADEON_PG_SUPPORT_GFX_DMG |
2485 RADEON_PG_SUPPORT_UVD |
2486 RADEON_PG_SUPPORT_VCE |
2487 RADEON_PG_SUPPORT_CP |
2488 RADEON_PG_SUPPORT_GDS |
2489 RADEON_PG_SUPPORT_RLC_SMU_HS |
2490 RADEON_PG_SUPPORT_ACP |
2491 RADEON_PG_SUPPORT_SAMU;*/
2492 } else {
2790 rdev->num_crtc = 2; 2493 rdev->num_crtc = 2;
2494 rdev->cg_flags =
2495 RADEON_CG_SUPPORT_GFX_MGCG |
2496 RADEON_CG_SUPPORT_GFX_MGLS |
2497 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2498 RADEON_CG_SUPPORT_GFX_CGLS |
2499 RADEON_CG_SUPPORT_GFX_CGTS |
2500 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2501 RADEON_CG_SUPPORT_GFX_CP_LS |
2502 RADEON_CG_SUPPORT_SDMA_MGCG |
2503 RADEON_CG_SUPPORT_SDMA_LS |
2504 RADEON_CG_SUPPORT_BIF_LS |
2505 RADEON_CG_SUPPORT_VCE_MGCG |
2506 RADEON_CG_SUPPORT_UVD_MGCG |
2507 RADEON_CG_SUPPORT_HDP_LS |
2508 RADEON_CG_SUPPORT_HDP_MGCG;
2509 rdev->pg_flags = 0;
2510 /*RADEON_PG_SUPPORT_GFX_CG |
2511 RADEON_PG_SUPPORT_GFX_SMG |
2512 RADEON_PG_SUPPORT_UVD |
2513 RADEON_PG_SUPPORT_VCE |
2514 RADEON_PG_SUPPORT_CP |
2515 RADEON_PG_SUPPORT_GDS |
2516 RADEON_PG_SUPPORT_RLC_SMU_HS |
2517 RADEON_PG_SUPPORT_SAMU;*/
2518 }
2519 rdev->has_uvd = true;
2791 break; 2520 break;
2792 default: 2521 default:
2793 /* FIXME: not supported yet */ 2522 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index ca1895709908..818bbe6b884b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -336,10 +336,6 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
336void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 336void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
337int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 337int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
338int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 338int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
339int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
340int r600_copy_blit(struct radeon_device *rdev,
341 uint64_t src_offset, uint64_t dst_offset,
342 unsigned num_gpu_pages, struct radeon_fence **fence);
343int r600_copy_cpdma(struct radeon_device *rdev, 339int r600_copy_cpdma(struct radeon_device *rdev,
344 uint64_t src_offset, uint64_t dst_offset, 340 uint64_t src_offset, uint64_t dst_offset,
345 unsigned num_gpu_pages, struct radeon_fence **fence); 341 unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -371,8 +367,6 @@ int r600_count_pipe_bits(uint32_t val);
371int r600_mc_wait_for_idle(struct radeon_device *rdev); 367int r600_mc_wait_for_idle(struct radeon_device *rdev);
372int r600_pcie_gart_init(struct radeon_device *rdev); 368int r600_pcie_gart_init(struct radeon_device *rdev);
373void r600_scratch_init(struct radeon_device *rdev); 369void r600_scratch_init(struct radeon_device *rdev);
374int r600_blit_init(struct radeon_device *rdev);
375void r600_blit_fini(struct radeon_device *rdev);
376int r600_init_microcode(struct radeon_device *rdev); 370int r600_init_microcode(struct radeon_device *rdev);
377/* r600 irq */ 371/* r600 irq */
378int r600_irq_process(struct radeon_device *rdev); 372int r600_irq_process(struct radeon_device *rdev);
@@ -385,28 +379,25 @@ void r600_disable_interrupts(struct radeon_device *rdev);
385void r600_rlc_stop(struct radeon_device *rdev); 379void r600_rlc_stop(struct radeon_device *rdev);
386/* r600 audio */ 380/* r600 audio */
387int r600_audio_init(struct radeon_device *rdev); 381int r600_audio_init(struct radeon_device *rdev);
388struct r600_audio r600_audio_status(struct radeon_device *rdev); 382struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
389void r600_audio_fini(struct radeon_device *rdev); 383void r600_audio_fini(struct radeon_device *rdev);
390int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 384int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
391void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); 385void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
392void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); 386void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
393void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 387void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
394/* r600 blit */
395int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
396 struct radeon_fence **fence, struct radeon_sa_bo **vb,
397 struct radeon_semaphore **sem);
398void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
399 struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
400void r600_kms_blit_copy(struct radeon_device *rdev,
401 u64 src_gpu_addr, u64 dst_gpu_addr,
402 unsigned num_gpu_pages,
403 struct radeon_sa_bo *vb);
404int r600_mc_wait_for_idle(struct radeon_device *rdev); 388int r600_mc_wait_for_idle(struct radeon_device *rdev);
405u32 r600_get_xclk(struct radeon_device *rdev); 389u32 r600_get_xclk(struct radeon_device *rdev);
406uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 390uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
407int rv6xx_get_temp(struct radeon_device *rdev); 391int rv6xx_get_temp(struct radeon_device *rdev);
408int r600_dpm_pre_set_power_state(struct radeon_device *rdev); 392int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
409void r600_dpm_post_set_power_state(struct radeon_device *rdev); 393void r600_dpm_post_set_power_state(struct radeon_device *rdev);
394/* r600 dma */
395uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
396 struct radeon_ring *ring);
397uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
398 struct radeon_ring *ring);
399void r600_dma_set_wptr(struct radeon_device *rdev,
400 struct radeon_ring *ring);
410/* rv6xx dpm */ 401/* rv6xx dpm */
411int rv6xx_dpm_init(struct radeon_device *rdev); 402int rv6xx_dpm_init(struct radeon_device *rdev);
412int rv6xx_dpm_enable(struct radeon_device *rdev); 403int rv6xx_dpm_enable(struct radeon_device *rdev);
@@ -421,6 +412,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
421 struct radeon_ps *ps); 412 struct radeon_ps *ps);
422void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 413void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
423 struct seq_file *m); 414 struct seq_file *m);
415int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
416 enum radeon_dpm_forced_level level);
424/* rs780 dpm */ 417/* rs780 dpm */
425int rs780_dpm_init(struct radeon_device *rdev); 418int rs780_dpm_init(struct radeon_device *rdev);
426int rs780_dpm_enable(struct radeon_device *rdev); 419int rs780_dpm_enable(struct radeon_device *rdev);
@@ -436,19 +429,6 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
436void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 429void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
437 struct seq_file *m); 430 struct seq_file *m);
438 431
439/* uvd */
440int r600_uvd_init(struct radeon_device *rdev);
441int r600_uvd_rbc_start(struct radeon_device *rdev);
442void r600_uvd_rbc_stop(struct radeon_device *rdev);
443int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
444void r600_uvd_fence_emit(struct radeon_device *rdev,
445 struct radeon_fence *fence);
446void r600_uvd_semaphore_emit(struct radeon_device *rdev,
447 struct radeon_ring *ring,
448 struct radeon_semaphore *semaphore,
449 bool emit_wait);
450void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
451
452/* 432/*
453 * rv770,rv730,rv710,rv740 433 * rv770,rv730,rv710,rv740
454 */ 434 */
@@ -466,7 +446,6 @@ int rv770_copy_dma(struct radeon_device *rdev,
466 unsigned num_gpu_pages, 446 unsigned num_gpu_pages,
467 struct radeon_fence **fence); 447 struct radeon_fence **fence);
468u32 rv770_get_xclk(struct radeon_device *rdev); 448u32 rv770_get_xclk(struct radeon_device *rdev);
469int rv770_uvd_resume(struct radeon_device *rdev);
470int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 449int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
471int rv770_get_temp(struct radeon_device *rdev); 450int rv770_get_temp(struct radeon_device *rdev);
472/* rv7xx pm */ 451/* rv7xx pm */
@@ -528,7 +507,6 @@ extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_ba
528extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 507extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
529extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc); 508extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
530void evergreen_disable_interrupt_state(struct radeon_device *rdev); 509void evergreen_disable_interrupt_state(struct radeon_device *rdev);
531int evergreen_blit_init(struct radeon_device *rdev);
532int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 510int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
533void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, 511void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
534 struct radeon_fence *fence); 512 struct radeon_fence *fence);
@@ -650,6 +628,8 @@ int trinity_dpm_force_performance_level(struct radeon_device *rdev,
650 628
651/* DCE6 - SI */ 629/* DCE6 - SI */
652void dce6_bandwidth_update(struct radeon_device *rdev); 630void dce6_bandwidth_update(struct radeon_device *rdev);
631int dce6_audio_init(struct radeon_device *rdev);
632void dce6_audio_fini(struct radeon_device *rdev);
653 633
654/* 634/*
655 * si 635 * si
@@ -710,7 +690,6 @@ u32 cik_get_xclk(struct radeon_device *rdev);
710uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg); 690uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
711void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 691void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
712int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 692int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
713int cik_uvd_resume(struct radeon_device *rdev);
714void cik_sdma_fence_ring_emit(struct radeon_device *rdev, 693void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
715 struct radeon_fence *fence); 694 struct radeon_fence *fence);
716void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 695void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
@@ -761,5 +740,81 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
761 struct radeon_ring *ring); 740 struct radeon_ring *ring);
762void cik_compute_ring_set_wptr(struct radeon_device *rdev, 741void cik_compute_ring_set_wptr(struct radeon_device *rdev,
763 struct radeon_ring *ring); 742 struct radeon_ring *ring);
743int ci_get_temp(struct radeon_device *rdev);
744int kv_get_temp(struct radeon_device *rdev);
745
746int ci_dpm_init(struct radeon_device *rdev);
747int ci_dpm_enable(struct radeon_device *rdev);
748void ci_dpm_disable(struct radeon_device *rdev);
749int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
750int ci_dpm_set_power_state(struct radeon_device *rdev);
751void ci_dpm_post_set_power_state(struct radeon_device *rdev);
752void ci_dpm_setup_asic(struct radeon_device *rdev);
753void ci_dpm_display_configuration_changed(struct radeon_device *rdev);
754void ci_dpm_fini(struct radeon_device *rdev);
755u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low);
756u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low);
757void ci_dpm_print_power_state(struct radeon_device *rdev,
758 struct radeon_ps *ps);
759void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
760 struct seq_file *m);
761int ci_dpm_force_performance_level(struct radeon_device *rdev,
762 enum radeon_dpm_forced_level level);
763bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
764void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
765
766int kv_dpm_init(struct radeon_device *rdev);
767int kv_dpm_enable(struct radeon_device *rdev);
768void kv_dpm_disable(struct radeon_device *rdev);
769int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
770int kv_dpm_set_power_state(struct radeon_device *rdev);
771void kv_dpm_post_set_power_state(struct radeon_device *rdev);
772void kv_dpm_setup_asic(struct radeon_device *rdev);
773void kv_dpm_display_configuration_changed(struct radeon_device *rdev);
774void kv_dpm_fini(struct radeon_device *rdev);
775u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low);
776u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low);
777void kv_dpm_print_power_state(struct radeon_device *rdev,
778 struct radeon_ps *ps);
779void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
780 struct seq_file *m);
781int kv_dpm_force_performance_level(struct radeon_device *rdev,
782 enum radeon_dpm_forced_level level);
783void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
784
785/* uvd v1.0 */
786uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
787 struct radeon_ring *ring);
788uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
789 struct radeon_ring *ring);
790void uvd_v1_0_set_wptr(struct radeon_device *rdev,
791 struct radeon_ring *ring);
792
793int uvd_v1_0_init(struct radeon_device *rdev);
794void uvd_v1_0_fini(struct radeon_device *rdev);
795int uvd_v1_0_start(struct radeon_device *rdev);
796void uvd_v1_0_stop(struct radeon_device *rdev);
797
798int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
799int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
800void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
801 struct radeon_ring *ring,
802 struct radeon_semaphore *semaphore,
803 bool emit_wait);
804void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
805
806/* uvd v2.2 */
807int uvd_v2_2_resume(struct radeon_device *rdev);
808void uvd_v2_2_fence_emit(struct radeon_device *rdev,
809 struct radeon_fence *fence);
810
811/* uvd v3.1 */
812void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
813 struct radeon_ring *ring,
814 struct radeon_semaphore *semaphore,
815 bool emit_wait);
816
817/* uvd v4.2 */
818int uvd_v4_2_resume(struct radeon_device *rdev);
764 819
765#endif 820#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index e3f3e8841789..404e25d285ba 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -163,8 +163,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
163 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 163 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
164 sizeof(ATOM_GPIO_I2C_ASSIGMENT); 164 sizeof(ATOM_GPIO_I2C_ASSIGMENT);
165 165
166 gpio = &i2c_info->asGPIO_Info[0];
166 for (i = 0; i < num_indices; i++) { 167 for (i = 0; i < num_indices; i++) {
167 gpio = &i2c_info->asGPIO_Info[i];
168 168
169 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); 169 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
170 170
@@ -172,6 +172,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
172 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); 172 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
173 break; 173 break;
174 } 174 }
175 gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
176 ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
175 } 177 }
176 } 178 }
177 179
@@ -195,9 +197,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
195 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 197 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
196 sizeof(ATOM_GPIO_I2C_ASSIGMENT); 198 sizeof(ATOM_GPIO_I2C_ASSIGMENT);
197 199
200 gpio = &i2c_info->asGPIO_Info[0];
198 for (i = 0; i < num_indices; i++) { 201 for (i = 0; i < num_indices; i++) {
199 gpio = &i2c_info->asGPIO_Info[i];
200
201 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); 202 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
202 203
203 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); 204 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
@@ -206,12 +207,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
206 sprintf(stmp, "0x%x", i2c.i2c_id); 207 sprintf(stmp, "0x%x", i2c.i2c_id);
207 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); 208 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
208 } 209 }
210 gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
211 ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
209 } 212 }
210 } 213 }
211} 214}
212 215
213static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, 216static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
214 u8 id) 217 u8 id)
215{ 218{
216 struct atom_context *ctx = rdev->mode_info.atom_context; 219 struct atom_context *ctx = rdev->mode_info.atom_context;
217 struct radeon_gpio_rec gpio; 220 struct radeon_gpio_rec gpio;
@@ -230,8 +233,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
230 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 233 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
231 sizeof(ATOM_GPIO_PIN_ASSIGNMENT); 234 sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
232 235
236 pin = gpio_info->asGPIO_Pin;
233 for (i = 0; i < num_indices; i++) { 237 for (i = 0; i < num_indices; i++) {
234 pin = &gpio_info->asGPIO_Pin[i];
235 if (id == pin->ucGPIO_ID) { 238 if (id == pin->ucGPIO_ID) {
236 gpio.id = pin->ucGPIO_ID; 239 gpio.id = pin->ucGPIO_ID;
237 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; 240 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
@@ -239,6 +242,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
239 gpio.valid = true; 242 gpio.valid = true;
240 break; 243 break;
241 } 244 }
245 pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
246 ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
242 } 247 }
243 } 248 }
244 249
@@ -711,13 +716,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
711 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) 716 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
712 (ctx->bios + data_offset + 717 (ctx->bios + data_offset +
713 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); 718 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
719 u8 *num_dst_objs = (u8 *)
720 ((u8 *)router_src_dst_table + 1 +
721 (router_src_dst_table->ucNumberOfSrc * 2));
722 u16 *dst_objs = (u16 *)(num_dst_objs + 1);
714 int enum_id; 723 int enum_id;
715 724
716 router.router_id = router_obj_id; 725 router.router_id = router_obj_id;
717 for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst; 726 for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
718 enum_id++) {
719 if (le16_to_cpu(path->usConnObjectId) == 727 if (le16_to_cpu(path->usConnObjectId) ==
720 le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id])) 728 le16_to_cpu(dst_objs[enum_id]))
721 break; 729 break;
722 } 730 }
723 731
@@ -1480,6 +1488,15 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1480 uint8_t frev, crev; 1488 uint8_t frev, crev;
1481 int i, num_indices; 1489 int i, num_indices;
1482 1490
1491 if (id == ASIC_INTERNAL_MEMORY_SS) {
1492 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
1493 return false;
1494 }
1495 if (id == ASIC_INTERNAL_ENGINE_SS) {
1496 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
1497 return false;
1498 }
1499
1483 memset(ss, 0, sizeof(struct radeon_atom_ss)); 1500 memset(ss, 0, sizeof(struct radeon_atom_ss));
1484 if (atom_parse_data_header(mode_info->atom_context, index, &size, 1501 if (atom_parse_data_header(mode_info->atom_context, index, &size,
1485 &frev, &crev, &data_offset)) { 1502 &frev, &crev, &data_offset)) {
@@ -1672,7 +1689,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1672 kfree(edid); 1689 kfree(edid);
1673 } 1690 }
1674 } 1691 }
1675 record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); 1692 record += fake_edid_record->ucFakeEDIDLength ?
1693 fake_edid_record->ucFakeEDIDLength + 2 :
1694 sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
1676 break; 1695 break;
1677 case LCD_PANEL_RESOLUTION_RECORD_TYPE: 1696 case LCD_PANEL_RESOLUTION_RECORD_TYPE:
1678 panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; 1697 panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
@@ -2237,6 +2256,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2237 (controller->ucFanParameters & 2256 (controller->ucFanParameters &
2238 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 2257 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2239 rdev->pm.int_thermal_type = THERMAL_TYPE_CI; 2258 rdev->pm.int_thermal_type = THERMAL_TYPE_CI;
2259 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
2260 DRM_INFO("Internal thermal controller %s fan control\n",
2261 (controller->ucFanParameters &
2262 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2263 rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
2240 } else if ((controller->ucType == 2264 } else if ((controller->ucType ==
2241 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || 2265 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
2242 (controller->ucType == 2266 (controller->ucType ==
@@ -2782,7 +2806,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2782 ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; 2806 ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
2783 dividers->enable_dithen = (args.v3.ucCntlFlag & 2807 dividers->enable_dithen = (args.v3.ucCntlFlag &
2784 ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; 2808 ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
2785 dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); 2809 dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
2786 dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); 2810 dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
2787 dividers->ref_div = args.v3.ucRefDiv; 2811 dividers->ref_div = args.v3.ucRefDiv;
2788 dividers->vco_mode = (args.v3.ucCntlFlag & 2812 dividers->vco_mode = (args.v3.ucCntlFlag &
@@ -3077,6 +3101,121 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev
3077 return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); 3101 return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
3078} 3102}
3079 3103
3104int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
3105 u16 *leakage_id)
3106{
3107 union set_voltage args;
3108 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
3109 u8 frev, crev;
3110
3111 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
3112 return -EINVAL;
3113
3114 switch (crev) {
3115 case 3:
3116 case 4:
3117 args.v3.ucVoltageType = 0;
3118 args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
3119 args.v3.usVoltageLevel = 0;
3120
3121 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
3122
3123 *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
3124 break;
3125 default:
3126 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3127 return -EINVAL;
3128 }
3129
3130 return 0;
3131}
3132
3133int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
3134 u16 *vddc, u16 *vddci,
3135 u16 virtual_voltage_id,
3136 u16 vbios_voltage_id)
3137{
3138 int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
3139 u8 frev, crev;
3140 u16 data_offset, size;
3141 int i, j;
3142 ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
3143 u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
3144
3145 *vddc = 0;
3146 *vddci = 0;
3147
3148 if (!atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3149 &frev, &crev, &data_offset))
3150 return -EINVAL;
3151
3152 profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
3153 (rdev->mode_info.atom_context->bios + data_offset);
3154
3155 switch (frev) {
3156 case 1:
3157 return -EINVAL;
3158 case 2:
3159 switch (crev) {
3160 case 1:
3161 if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
3162 return -EINVAL;
3163 leakage_bin = (u16 *)
3164 (rdev->mode_info.atom_context->bios + data_offset +
3165 le16_to_cpu(profile->usLeakageBinArrayOffset));
3166 vddc_id_buf = (u16 *)
3167 (rdev->mode_info.atom_context->bios + data_offset +
3168 le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
3169 vddc_buf = (u16 *)
3170 (rdev->mode_info.atom_context->bios + data_offset +
3171 le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
3172 vddci_id_buf = (u16 *)
3173 (rdev->mode_info.atom_context->bios + data_offset +
3174 le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
3175 vddci_buf = (u16 *)
3176 (rdev->mode_info.atom_context->bios + data_offset +
3177 le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
3178
3179 if (profile->ucElbVDDC_Num > 0) {
3180 for (i = 0; i < profile->ucElbVDDC_Num; i++) {
3181 if (vddc_id_buf[i] == virtual_voltage_id) {
3182 for (j = 0; j < profile->ucLeakageBinNum; j++) {
3183 if (vbios_voltage_id <= leakage_bin[j]) {
3184 *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
3185 break;
3186 }
3187 }
3188 break;
3189 }
3190 }
3191 }
3192 if (profile->ucElbVDDCI_Num > 0) {
3193 for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
3194 if (vddci_id_buf[i] == virtual_voltage_id) {
3195 for (j = 0; j < profile->ucLeakageBinNum; j++) {
3196 if (vbios_voltage_id <= leakage_bin[j]) {
3197 *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
3198 break;
3199 }
3200 }
3201 break;
3202 }
3203 }
3204 }
3205 break;
3206 default:
3207 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3208 return -EINVAL;
3209 }
3210 break;
3211 default:
3212 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3213 return -EINVAL;
3214 }
3215
3216 return 0;
3217}
3218
3080int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev, 3219int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
3081 u16 voltage_level, u8 voltage_type, 3220 u16 voltage_level, u8 voltage_type,
3082 u32 *gpio_value, u32 *gpio_mask) 3221 u32 *gpio_value, u32 *gpio_mask)
@@ -3279,10 +3418,11 @@ int radeon_atom_get_max_voltage(struct radeon_device *rdev,
3279 ATOM_VOLTAGE_FORMULA_V2 *formula = 3418 ATOM_VOLTAGE_FORMULA_V2 *formula =
3280 &voltage_object->v2.asFormula; 3419 &voltage_object->v2.asFormula;
3281 if (formula->ucNumOfVoltageEntries) { 3420 if (formula->ucNumOfVoltageEntries) {
3421 VOLTAGE_LUT_ENTRY *lut = (VOLTAGE_LUT_ENTRY *)
3422 ((u8 *)&formula->asVIDAdjustEntries[0] +
3423 (sizeof(VOLTAGE_LUT_ENTRY) * (formula->ucNumOfVoltageEntries - 1)));
3282 *max_voltage = 3424 *max_voltage =
3283 le16_to_cpu(formula->asVIDAdjustEntries[ 3425 le16_to_cpu(lut->usVoltageValue);
3284 formula->ucNumOfVoltageEntries - 1
3285 ].usVoltageValue);
3286 return 0; 3426 return 0;
3287 } 3427 }
3288 } 3428 }
@@ -3442,11 +3582,13 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
3442 if (voltage_object) { 3582 if (voltage_object) {
3443 ATOM_VOLTAGE_FORMULA_V2 *formula = 3583 ATOM_VOLTAGE_FORMULA_V2 *formula =
3444 &voltage_object->v2.asFormula; 3584 &voltage_object->v2.asFormula;
3585 VOLTAGE_LUT_ENTRY *lut;
3445 if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES) 3586 if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES)
3446 return -EINVAL; 3587 return -EINVAL;
3588 lut = &formula->asVIDAdjustEntries[0];
3447 for (i = 0; i < formula->ucNumOfVoltageEntries; i++) { 3589 for (i = 0; i < formula->ucNumOfVoltageEntries; i++) {
3448 voltage_table->entries[i].value = 3590 voltage_table->entries[i].value =
3449 le16_to_cpu(formula->asVIDAdjustEntries[i].usVoltageValue); 3591 le16_to_cpu(lut->usVoltageValue);
3450 ret = radeon_atom_get_voltage_gpio_settings(rdev, 3592 ret = radeon_atom_get_voltage_gpio_settings(rdev,
3451 voltage_table->entries[i].value, 3593 voltage_table->entries[i].value,
3452 voltage_type, 3594 voltage_type,
@@ -3454,6 +3596,8 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
3454 &voltage_table->mask_low); 3596 &voltage_table->mask_low);
3455 if (ret) 3597 if (ret)
3456 return ret; 3598 return ret;
3599 lut = (VOLTAGE_LUT_ENTRY *)
3600 ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY));
3457 } 3601 }
3458 voltage_table->count = formula->ucNumOfVoltageEntries; 3602 voltage_table->count = formula->ucNumOfVoltageEntries;
3459 return 0; 3603 return 0;
@@ -3473,13 +3617,17 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
3473 if (voltage_object) { 3617 if (voltage_object) {
3474 ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio = 3618 ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
3475 &voltage_object->v3.asGpioVoltageObj; 3619 &voltage_object->v3.asGpioVoltageObj;
3620 VOLTAGE_LUT_ENTRY_V2 *lut;
3476 if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES) 3621 if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
3477 return -EINVAL; 3622 return -EINVAL;
3623 lut = &gpio->asVolGpioLut[0];
3478 for (i = 0; i < gpio->ucGpioEntryNum; i++) { 3624 for (i = 0; i < gpio->ucGpioEntryNum; i++) {
3479 voltage_table->entries[i].value = 3625 voltage_table->entries[i].value =
3480 le16_to_cpu(gpio->asVolGpioLut[i].usVoltageValue); 3626 le16_to_cpu(lut->usVoltageValue);
3481 voltage_table->entries[i].smio_low = 3627 voltage_table->entries[i].smio_low =
3482 le32_to_cpu(gpio->asVolGpioLut[i].ulVoltageId); 3628 le32_to_cpu(lut->ulVoltageId);
3629 lut = (VOLTAGE_LUT_ENTRY_V2 *)
3630 ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
3483 } 3631 }
3484 voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal); 3632 voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
3485 voltage_table->count = gpio->ucGpioEntryNum; 3633 voltage_table->count = gpio->ucGpioEntryNum;
@@ -3605,7 +3753,6 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
3605 union vram_info *vram_info; 3753 union vram_info *vram_info;
3606 u32 mem_timing_size = gddr5 ? 3754 u32 mem_timing_size = gddr5 ?
3607 sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT); 3755 sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT);
3608 u8 *p;
3609 3756
3610 memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table)); 3757 memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table));
3611 3758
@@ -3624,6 +3771,7 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
3624 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { 3771 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
3625 ATOM_VRAM_MODULE_V4 *vram_module = 3772 ATOM_VRAM_MODULE_V4 *vram_module =
3626 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; 3773 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
3774 ATOM_MEMORY_TIMING_FORMAT *format;
3627 3775
3628 for (i = 0; i < module_index; i++) { 3776 for (i = 0; i < module_index; i++) {
3629 if (le16_to_cpu(vram_module->usModuleSize) == 0) 3777 if (le16_to_cpu(vram_module->usModuleSize) == 0)
@@ -3634,11 +3782,11 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
3634 mclk_range_table->num_entries = (u8) 3782 mclk_range_table->num_entries = (u8)
3635 ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) / 3783 ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
3636 mem_timing_size); 3784 mem_timing_size);
3637 p = (u8 *)&vram_module->asMemTiming[0]; 3785 format = &vram_module->asMemTiming[0];
3638 for (i = 0; i < mclk_range_table->num_entries; i++) { 3786 for (i = 0; i < mclk_range_table->num_entries; i++) {
3639 ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p;
3640 mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange); 3787 mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
3641 p += mem_timing_size; 3788 format = (ATOM_MEMORY_TIMING_FORMAT *)
3789 ((u8 *)format + mem_timing_size);
3642 } 3790 }
3643 } else 3791 } else
3644 return -EINVAL; 3792 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h
deleted file mode 100644
index 4ecbe72c9d2d..000000000000
--- a/drivers/gpu/drm/radeon/radeon_blit_common.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 * Copyright 2012 Alcatel-Lucent, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __RADEON_BLIT_COMMON_H__
28
29#define DI_PT_RECTLIST 0x11
30#define DI_INDEX_SIZE_16_BIT 0x0
31#define DI_SRC_SEL_AUTO_INDEX 0x2
32
33#define FMT_8 0x1
34#define FMT_5_6_5 0x8
35#define FMT_8_8_8_8 0x1a
36#define COLOR_8 0x1
37#define COLOR_5_6_5 0x8
38#define COLOR_8_8_8_8 0x1a
39
40#define RECT_UNIT_H 32
41#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
42
43#define __RADEON_BLIT_COMMON_H__
44#endif
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 78edadc9e86b..68ce36056019 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
147 enum radeon_combios_table_offset table) 147 enum radeon_combios_table_offset table)
148{ 148{
149 struct radeon_device *rdev = dev->dev_private; 149 struct radeon_device *rdev = dev->dev_private;
150 int rev; 150 int rev, size;
151 uint16_t offset = 0, check_offset; 151 uint16_t offset = 0, check_offset;
152 152
153 if (!rdev->bios) 153 if (!rdev->bios)
@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
156 switch (table) { 156 switch (table) {
157 /* absolute offset tables */ 157 /* absolute offset tables */
158 case COMBIOS_ASIC_INIT_1_TABLE: 158 case COMBIOS_ASIC_INIT_1_TABLE:
159 check_offset = RBIOS16(rdev->bios_header_start + 0xc); 159 check_offset = 0xc;
160 if (check_offset)
161 offset = check_offset;
162 break; 160 break;
163 case COMBIOS_BIOS_SUPPORT_TABLE: 161 case COMBIOS_BIOS_SUPPORT_TABLE:
164 check_offset = RBIOS16(rdev->bios_header_start + 0x14); 162 check_offset = 0x14;
165 if (check_offset)
166 offset = check_offset;
167 break; 163 break;
168 case COMBIOS_DAC_PROGRAMMING_TABLE: 164 case COMBIOS_DAC_PROGRAMMING_TABLE:
169 check_offset = RBIOS16(rdev->bios_header_start + 0x2a); 165 check_offset = 0x2a;
170 if (check_offset)
171 offset = check_offset;
172 break; 166 break;
173 case COMBIOS_MAX_COLOR_DEPTH_TABLE: 167 case COMBIOS_MAX_COLOR_DEPTH_TABLE:
174 check_offset = RBIOS16(rdev->bios_header_start + 0x2c); 168 check_offset = 0x2c;
175 if (check_offset)
176 offset = check_offset;
177 break; 169 break;
178 case COMBIOS_CRTC_INFO_TABLE: 170 case COMBIOS_CRTC_INFO_TABLE:
179 check_offset = RBIOS16(rdev->bios_header_start + 0x2e); 171 check_offset = 0x2e;
180 if (check_offset)
181 offset = check_offset;
182 break; 172 break;
183 case COMBIOS_PLL_INFO_TABLE: 173 case COMBIOS_PLL_INFO_TABLE:
184 check_offset = RBIOS16(rdev->bios_header_start + 0x30); 174 check_offset = 0x30;
185 if (check_offset)
186 offset = check_offset;
187 break; 175 break;
188 case COMBIOS_TV_INFO_TABLE: 176 case COMBIOS_TV_INFO_TABLE:
189 check_offset = RBIOS16(rdev->bios_header_start + 0x32); 177 check_offset = 0x32;
190 if (check_offset)
191 offset = check_offset;
192 break; 178 break;
193 case COMBIOS_DFP_INFO_TABLE: 179 case COMBIOS_DFP_INFO_TABLE:
194 check_offset = RBIOS16(rdev->bios_header_start + 0x34); 180 check_offset = 0x34;
195 if (check_offset)
196 offset = check_offset;
197 break; 181 break;
198 case COMBIOS_HW_CONFIG_INFO_TABLE: 182 case COMBIOS_HW_CONFIG_INFO_TABLE:
199 check_offset = RBIOS16(rdev->bios_header_start + 0x36); 183 check_offset = 0x36;
200 if (check_offset)
201 offset = check_offset;
202 break; 184 break;
203 case COMBIOS_MULTIMEDIA_INFO_TABLE: 185 case COMBIOS_MULTIMEDIA_INFO_TABLE:
204 check_offset = RBIOS16(rdev->bios_header_start + 0x38); 186 check_offset = 0x38;
205 if (check_offset)
206 offset = check_offset;
207 break; 187 break;
208 case COMBIOS_TV_STD_PATCH_TABLE: 188 case COMBIOS_TV_STD_PATCH_TABLE:
209 check_offset = RBIOS16(rdev->bios_header_start + 0x3e); 189 check_offset = 0x3e;
210 if (check_offset)
211 offset = check_offset;
212 break; 190 break;
213 case COMBIOS_LCD_INFO_TABLE: 191 case COMBIOS_LCD_INFO_TABLE:
214 check_offset = RBIOS16(rdev->bios_header_start + 0x40); 192 check_offset = 0x40;
215 if (check_offset)
216 offset = check_offset;
217 break; 193 break;
218 case COMBIOS_MOBILE_INFO_TABLE: 194 case COMBIOS_MOBILE_INFO_TABLE:
219 check_offset = RBIOS16(rdev->bios_header_start + 0x42); 195 check_offset = 0x42;
220 if (check_offset)
221 offset = check_offset;
222 break; 196 break;
223 case COMBIOS_PLL_INIT_TABLE: 197 case COMBIOS_PLL_INIT_TABLE:
224 check_offset = RBIOS16(rdev->bios_header_start + 0x46); 198 check_offset = 0x46;
225 if (check_offset)
226 offset = check_offset;
227 break; 199 break;
228 case COMBIOS_MEM_CONFIG_TABLE: 200 case COMBIOS_MEM_CONFIG_TABLE:
229 check_offset = RBIOS16(rdev->bios_header_start + 0x48); 201 check_offset = 0x48;
230 if (check_offset)
231 offset = check_offset;
232 break; 202 break;
233 case COMBIOS_SAVE_MASK_TABLE: 203 case COMBIOS_SAVE_MASK_TABLE:
234 check_offset = RBIOS16(rdev->bios_header_start + 0x4a); 204 check_offset = 0x4a;
235 if (check_offset)
236 offset = check_offset;
237 break; 205 break;
238 case COMBIOS_HARDCODED_EDID_TABLE: 206 case COMBIOS_HARDCODED_EDID_TABLE:
239 check_offset = RBIOS16(rdev->bios_header_start + 0x4c); 207 check_offset = 0x4c;
240 if (check_offset)
241 offset = check_offset;
242 break; 208 break;
243 case COMBIOS_ASIC_INIT_2_TABLE: 209 case COMBIOS_ASIC_INIT_2_TABLE:
244 check_offset = RBIOS16(rdev->bios_header_start + 0x4e); 210 check_offset = 0x4e;
245 if (check_offset)
246 offset = check_offset;
247 break; 211 break;
248 case COMBIOS_CONNECTOR_INFO_TABLE: 212 case COMBIOS_CONNECTOR_INFO_TABLE:
249 check_offset = RBIOS16(rdev->bios_header_start + 0x50); 213 check_offset = 0x50;
250 if (check_offset)
251 offset = check_offset;
252 break; 214 break;
253 case COMBIOS_DYN_CLK_1_TABLE: 215 case COMBIOS_DYN_CLK_1_TABLE:
254 check_offset = RBIOS16(rdev->bios_header_start + 0x52); 216 check_offset = 0x52;
255 if (check_offset)
256 offset = check_offset;
257 break; 217 break;
258 case COMBIOS_RESERVED_MEM_TABLE: 218 case COMBIOS_RESERVED_MEM_TABLE:
259 check_offset = RBIOS16(rdev->bios_header_start + 0x54); 219 check_offset = 0x54;
260 if (check_offset)
261 offset = check_offset;
262 break; 220 break;
263 case COMBIOS_EXT_TMDS_INFO_TABLE: 221 case COMBIOS_EXT_TMDS_INFO_TABLE:
264 check_offset = RBIOS16(rdev->bios_header_start + 0x58); 222 check_offset = 0x58;
265 if (check_offset)
266 offset = check_offset;
267 break; 223 break;
268 case COMBIOS_MEM_CLK_INFO_TABLE: 224 case COMBIOS_MEM_CLK_INFO_TABLE:
269 check_offset = RBIOS16(rdev->bios_header_start + 0x5a); 225 check_offset = 0x5a;
270 if (check_offset)
271 offset = check_offset;
272 break; 226 break;
273 case COMBIOS_EXT_DAC_INFO_TABLE: 227 case COMBIOS_EXT_DAC_INFO_TABLE:
274 check_offset = RBIOS16(rdev->bios_header_start + 0x5c); 228 check_offset = 0x5c;
275 if (check_offset)
276 offset = check_offset;
277 break; 229 break;
278 case COMBIOS_MISC_INFO_TABLE: 230 case COMBIOS_MISC_INFO_TABLE:
279 check_offset = RBIOS16(rdev->bios_header_start + 0x5e); 231 check_offset = 0x5e;
280 if (check_offset)
281 offset = check_offset;
282 break; 232 break;
283 case COMBIOS_CRT_INFO_TABLE: 233 case COMBIOS_CRT_INFO_TABLE:
284 check_offset = RBIOS16(rdev->bios_header_start + 0x60); 234 check_offset = 0x60;
285 if (check_offset)
286 offset = check_offset;
287 break; 235 break;
288 case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: 236 case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
289 check_offset = RBIOS16(rdev->bios_header_start + 0x62); 237 check_offset = 0x62;
290 if (check_offset)
291 offset = check_offset;
292 break; 238 break;
293 case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: 239 case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
294 check_offset = RBIOS16(rdev->bios_header_start + 0x64); 240 check_offset = 0x64;
295 if (check_offset)
296 offset = check_offset;
297 break; 241 break;
298 case COMBIOS_FAN_SPEED_INFO_TABLE: 242 case COMBIOS_FAN_SPEED_INFO_TABLE:
299 check_offset = RBIOS16(rdev->bios_header_start + 0x66); 243 check_offset = 0x66;
300 if (check_offset)
301 offset = check_offset;
302 break; 244 break;
303 case COMBIOS_OVERDRIVE_INFO_TABLE: 245 case COMBIOS_OVERDRIVE_INFO_TABLE:
304 check_offset = RBIOS16(rdev->bios_header_start + 0x68); 246 check_offset = 0x68;
305 if (check_offset)
306 offset = check_offset;
307 break; 247 break;
308 case COMBIOS_OEM_INFO_TABLE: 248 case COMBIOS_OEM_INFO_TABLE:
309 check_offset = RBIOS16(rdev->bios_header_start + 0x6a); 249 check_offset = 0x6a;
310 if (check_offset)
311 offset = check_offset;
312 break; 250 break;
313 case COMBIOS_DYN_CLK_2_TABLE: 251 case COMBIOS_DYN_CLK_2_TABLE:
314 check_offset = RBIOS16(rdev->bios_header_start + 0x6c); 252 check_offset = 0x6c;
315 if (check_offset)
316 offset = check_offset;
317 break; 253 break;
318 case COMBIOS_POWER_CONNECTOR_INFO_TABLE: 254 case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
319 check_offset = RBIOS16(rdev->bios_header_start + 0x6e); 255 check_offset = 0x6e;
320 if (check_offset)
321 offset = check_offset;
322 break; 256 break;
323 case COMBIOS_I2C_INFO_TABLE: 257 case COMBIOS_I2C_INFO_TABLE:
324 check_offset = RBIOS16(rdev->bios_header_start + 0x70); 258 check_offset = 0x70;
325 if (check_offset)
326 offset = check_offset;
327 break; 259 break;
328 /* relative offset tables */ 260 /* relative offset tables */
329 case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ 261 case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
439 } 371 }
440 break; 372 break;
441 default: 373 default:
374 check_offset = 0;
442 break; 375 break;
443 } 376 }
444 377
445 return offset; 378 size = RBIOS8(rdev->bios_header_start + 0x6);
379 /* check absolute offset tables */
380 if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
381 offset = RBIOS16(rdev->bios_header_start + check_offset);
446 382
383 return offset;
447} 384}
448 385
449bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) 386bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
@@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
965 dac = RBIOS8(dac_info + 0x3) & 0xf; 902 dac = RBIOS8(dac_info + 0x3) & 0xf;
966 p_dac->ps2_pdac_adj = (bg << 8) | (dac); 903 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
967 } 904 }
968 /* if the values are all zeros, use the table */ 905 /* if the values are zeros, use the table */
969 if (p_dac->ps2_pdac_adj) 906 if ((dac == 0) || (bg == 0))
907 found = 0;
908 else
970 found = 1; 909 found = 1;
971 } 910 }
972 911
973 /* quirks */ 912 /* quirks */
913 /* Radeon 7000 (RV100) */
914 if (((dev->pdev->device == 0x5159) &&
915 (dev->pdev->subsystem_vendor == 0x174B) &&
916 (dev->pdev->subsystem_device == 0x7c28)) ||
974 /* Radeon 9100 (R200) */ 917 /* Radeon 9100 (R200) */
975 if ((dev->pdev->device == 0x514D) && 918 ((dev->pdev->device == 0x514D) &&
976 (dev->pdev->subsystem_vendor == 0x174B) && 919 (dev->pdev->subsystem_vendor == 0x174B) &&
977 (dev->pdev->subsystem_device == 0x7149)) { 920 (dev->pdev->subsystem_device == 0x7149))) {
978 /* vbios value is bad, use the default */ 921 /* vbios value is bad, use the default */
979 found = 0; 922 found = 0;
980 } 923 }
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index efc4f6441ef4..3cae2bbc1854 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1444 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle 1444 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
1445 + init->ring_size / sizeof(u32)); 1445 + init->ring_size / sizeof(u32));
1446 dev_priv->ring.size = init->ring_size; 1446 dev_priv->ring.size = init->ring_size;
1447 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); 1447 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
1448 1448
1449 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; 1449 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
1450 dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); 1450 dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8);
1451 1451
1452 dev_priv->ring.fetch_size = /* init->fetch_size */ 32; 1452 dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
1453 dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); 1453 dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16);
1454 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 1454 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1455 1455
1456 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 1456 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 13a130fb3517..a56084410372 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -268,7 +268,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
268 return -EINVAL; 268 return -EINVAL;
269 269
270 /* we only support VM on some SI+ rings */ 270 /* we only support VM on some SI+ rings */
271 if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) && 271 if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
272 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { 272 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
273 DRM_ERROR("Ring %d requires VM!\n", p->ring); 273 DRM_ERROR("Ring %d requires VM!\n", p->ring);
274 return -EINVAL; 274 return -EINVAL;
@@ -383,6 +383,10 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
383 DRM_ERROR("Invalid command stream !\n"); 383 DRM_ERROR("Invalid command stream !\n");
384 return r; 384 return r;
385 } 385 }
386
387 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
388 radeon_uvd_note_usage(rdev);
389
386 radeon_cs_sync_rings(parser); 390 radeon_cs_sync_rings(parser);
387 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 391 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
388 if (r) { 392 if (r) {
@@ -474,6 +478,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
474 return r; 478 return r;
475 } 479 }
476 480
481 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
482 radeon_uvd_note_usage(rdev);
483
477 mutex_lock(&rdev->vm_manager.lock); 484 mutex_lock(&rdev->vm_manager.lock);
478 mutex_lock(&vm->mutex); 485 mutex_lock(&vm->mutex);
479 r = radeon_vm_alloc_pt(rdev, vm); 486 r = radeon_vm_alloc_pt(rdev, vm);
@@ -552,10 +559,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
552 return r; 559 return r;
553 } 560 }
554 561
555 /* XXX pick SD/HD/MVC */
556 if (parser.ring == R600_RING_TYPE_UVD_INDEX)
557 radeon_uvd_note_usage(rdev);
558
559 r = radeon_cs_ib_chunk(rdev, &parser); 562 r = radeon_cs_ib_chunk(rdev, &parser);
560 if (r) { 563 if (r) {
561 goto out; 564 goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 82335e38ec4f..16cb8792b1e6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1003,16 +1003,28 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1003 radeon_vram_limit = 0; 1003 radeon_vram_limit = 0;
1004 } 1004 }
1005 1005
1006 if (radeon_gart_size == -1) {
1007 /* default to a larger gart size on newer asics */
1008 if (rdev->family >= CHIP_RV770)
1009 radeon_gart_size = 1024;
1010 else
1011 radeon_gart_size = 512;
1012 }
1006 /* gtt size must be power of two and greater or equal to 32M */ 1013 /* gtt size must be power of two and greater or equal to 32M */
1007 if (radeon_gart_size < 32) { 1014 if (radeon_gart_size < 32) {
1008 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 1015 dev_warn(rdev->dev, "gart size (%d) too small\n",
1009 radeon_gart_size); 1016 radeon_gart_size);
1010 radeon_gart_size = 512; 1017 if (rdev->family >= CHIP_RV770)
1011 1018 radeon_gart_size = 1024;
1019 else
1020 radeon_gart_size = 512;
1012 } else if (!radeon_check_pot_argument(radeon_gart_size)) { 1021 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1013 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 1022 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1014 radeon_gart_size); 1023 radeon_gart_size);
1015 radeon_gart_size = 512; 1024 if (rdev->family >= CHIP_RV770)
1025 radeon_gart_size = 1024;
1026 else
1027 radeon_gart_size = 512;
1016 } 1028 }
1017 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 1029 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1018 1030
@@ -1144,7 +1156,7 @@ int radeon_device_init(struct radeon_device *rdev,
1144 rdev->family = flags & RADEON_FAMILY_MASK; 1156 rdev->family = flags & RADEON_FAMILY_MASK;
1145 rdev->is_atom_bios = false; 1157 rdev->is_atom_bios = false;
1146 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 1158 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1147 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 1159 rdev->mc.gtt_size = 512 * 1024 * 1024;
1148 rdev->accel_working = false; 1160 rdev->accel_working = false;
1149 /* set up ring ids */ 1161 /* set up ring ids */
1150 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1162 for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1163,6 +1175,7 @@ int radeon_device_init(struct radeon_device *rdev,
1163 mutex_init(&rdev->gem.mutex); 1175 mutex_init(&rdev->gem.mutex);
1164 mutex_init(&rdev->pm.mutex); 1176 mutex_init(&rdev->pm.mutex);
1165 mutex_init(&rdev->gpu_clock_mutex); 1177 mutex_init(&rdev->gpu_clock_mutex);
1178 mutex_init(&rdev->srbm_mutex);
1166 init_rwsem(&rdev->pm.mclk_lock); 1179 init_rwsem(&rdev->pm.mclk_lock);
1167 init_rwsem(&rdev->exclusive_lock); 1180 init_rwsem(&rdev->exclusive_lock);
1168 init_waitqueue_head(&rdev->irq.vblank_queue); 1181 init_waitqueue_head(&rdev->irq.vblank_queue);
@@ -1269,7 +1282,7 @@ int radeon_device_init(struct radeon_device *rdev,
1269 /* this will fail for cards that aren't VGA class devices, just 1282 /* this will fail for cards that aren't VGA class devices, just
1270 * ignore it */ 1283 * ignore it */
1271 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1284 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1272 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops); 1285 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
1273 1286
1274 r = radeon_init(rdev); 1287 r = radeon_init(rdev);
1275 if (r) 1288 if (r)
@@ -1519,6 +1532,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1519 radeon_save_bios_scratch_regs(rdev); 1532 radeon_save_bios_scratch_regs(rdev);
1520 /* block TTM */ 1533 /* block TTM */
1521 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1534 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1535 radeon_pm_suspend(rdev);
1522 radeon_suspend(rdev); 1536 radeon_suspend(rdev);
1523 1537
1524 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1538 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1564,6 +1578,7 @@ retry:
1564 } 1578 }
1565 } 1579 }
1566 1580
1581 radeon_pm_resume(rdev);
1567 drm_helper_resume_force_mode(rdev->ddev); 1582 drm_helper_resume_force_mode(rdev->ddev);
1568 1583
1569 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1584 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c2b67b4e1ac2..b055bddaa94c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -345,7 +345,8 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
345 345
346static int radeon_crtc_page_flip(struct drm_crtc *crtc, 346static int radeon_crtc_page_flip(struct drm_crtc *crtc,
347 struct drm_framebuffer *fb, 347 struct drm_framebuffer *fb,
348 struct drm_pending_vblank_event *event) 348 struct drm_pending_vblank_event *event,
349 uint32_t page_flip_flags)
349{ 350{
350 struct drm_device *dev = crtc->dev; 351 struct drm_device *dev = crtc->dev;
351 struct radeon_device *rdev = dev->dev_private; 352 struct radeon_device *rdev = dev->dev_private;
@@ -1254,41 +1255,41 @@ static void radeon_afmt_init(struct radeon_device *rdev)
1254 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) 1255 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
1255 rdev->mode_info.afmt[i] = NULL; 1256 rdev->mode_info.afmt[i] = NULL;
1256 1257
1257 if (ASIC_IS_DCE6(rdev)) { 1258 if (ASIC_IS_NODCE(rdev)) {
1258 /* todo */ 1259 /* nothing to do */
1259 } else if (ASIC_IS_DCE4(rdev)) { 1260 } else if (ASIC_IS_DCE4(rdev)) {
1261 static uint32_t eg_offsets[] = {
1262 EVERGREEN_CRTC0_REGISTER_OFFSET,
1263 EVERGREEN_CRTC1_REGISTER_OFFSET,
1264 EVERGREEN_CRTC2_REGISTER_OFFSET,
1265 EVERGREEN_CRTC3_REGISTER_OFFSET,
1266 EVERGREEN_CRTC4_REGISTER_OFFSET,
1267 EVERGREEN_CRTC5_REGISTER_OFFSET,
1268 0x13830 - 0x7030,
1269 };
1270 int num_afmt;
1271
1272 /* DCE8 has 7 audio blocks tied to DIG encoders */
1273 /* DCE6 has 6 audio blocks tied to DIG encoders */
1260 /* DCE4/5 has 6 audio blocks tied to DIG encoders */ 1274 /* DCE4/5 has 6 audio blocks tied to DIG encoders */
1261 /* DCE4.1 has 2 audio blocks tied to DIG encoders */ 1275 /* DCE4.1 has 2 audio blocks tied to DIG encoders */
1262 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1276 if (ASIC_IS_DCE8(rdev))
1263 if (rdev->mode_info.afmt[0]) { 1277 num_afmt = 7;
1264 rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET; 1278 else if (ASIC_IS_DCE6(rdev))
1265 rdev->mode_info.afmt[0]->id = 0; 1279 num_afmt = 6;
1266 } 1280 else if (ASIC_IS_DCE5(rdev))
1267 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1281 num_afmt = 6;
1268 if (rdev->mode_info.afmt[1]) { 1282 else if (ASIC_IS_DCE41(rdev))
1269 rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET; 1283 num_afmt = 2;
1270 rdev->mode_info.afmt[1]->id = 1; 1284 else /* DCE4 */
1271 } 1285 num_afmt = 6;
1272 if (!ASIC_IS_DCE41(rdev)) { 1286
1273 rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1287 BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets));
1274 if (rdev->mode_info.afmt[2]) { 1288 for (i = 0; i < num_afmt; i++) {
1275 rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET; 1289 rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1276 rdev->mode_info.afmt[2]->id = 2; 1290 if (rdev->mode_info.afmt[i]) {
1277 } 1291 rdev->mode_info.afmt[i]->offset = eg_offsets[i];
1278 rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1292 rdev->mode_info.afmt[i]->id = i;
1279 if (rdev->mode_info.afmt[3]) {
1280 rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
1281 rdev->mode_info.afmt[3]->id = 3;
1282 }
1283 rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1284 if (rdev->mode_info.afmt[4]) {
1285 rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
1286 rdev->mode_info.afmt[4]->id = 4;
1287 }
1288 rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1289 if (rdev->mode_info.afmt[5]) {
1290 rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
1291 rdev->mode_info.afmt[5]->id = 5;
1292 } 1293 }
1293 } 1294 }
1294 } else if (ASIC_IS_DCE3(rdev)) { 1295 } else if (ASIC_IS_DCE3(rdev)) {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 29876b1be8ec..cb4445f55a96 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -81,7 +81,6 @@
81#define KMS_DRIVER_PATCHLEVEL 0 81#define KMS_DRIVER_PATCHLEVEL 0
82int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 82int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
83int radeon_driver_unload_kms(struct drm_device *dev); 83int radeon_driver_unload_kms(struct drm_device *dev);
84int radeon_driver_firstopen_kms(struct drm_device *dev);
85void radeon_driver_lastclose_kms(struct drm_device *dev); 84void radeon_driver_lastclose_kms(struct drm_device *dev);
86int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 85int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
87void radeon_driver_postclose_kms(struct drm_device *dev, 86void radeon_driver_postclose_kms(struct drm_device *dev,
@@ -101,8 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
101int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 100int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
102void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 101void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
103irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); 102irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
104int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
105 struct drm_file *file_priv);
106int radeon_gem_object_init(struct drm_gem_object *obj); 103int radeon_gem_object_init(struct drm_gem_object *obj);
107void radeon_gem_object_free(struct drm_gem_object *obj); 104void radeon_gem_object_free(struct drm_gem_object *obj);
108int radeon_gem_object_open(struct drm_gem_object *obj, 105int radeon_gem_object_open(struct drm_gem_object *obj,
@@ -111,7 +108,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
111 struct drm_file *file_priv); 108 struct drm_file *file_priv);
112extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 109extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
113 int *vpos, int *hpos); 110 int *vpos, int *hpos);
114extern struct drm_ioctl_desc radeon_ioctls_kms[]; 111extern const struct drm_ioctl_desc radeon_ioctls_kms[];
115extern int radeon_max_kms_ioctl; 112extern int radeon_max_kms_ioctl;
116int radeon_mmap(struct file *filp, struct vm_area_struct *vma); 113int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
117int radeon_mode_dumb_mmap(struct drm_file *filp, 114int radeon_mode_dumb_mmap(struct drm_file *filp,
@@ -120,9 +117,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
120int radeon_mode_dumb_create(struct drm_file *file_priv, 117int radeon_mode_dumb_create(struct drm_file *file_priv,
121 struct drm_device *dev, 118 struct drm_device *dev,
122 struct drm_mode_create_dumb *args); 119 struct drm_mode_create_dumb *args);
123int radeon_mode_dumb_destroy(struct drm_file *file_priv,
124 struct drm_device *dev,
125 uint32_t handle);
126struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); 120struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
127struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, 121struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
128 size_t size, 122 size_t size,
@@ -154,7 +148,7 @@ int radeon_dynclks = -1;
154int radeon_r4xx_atom = 0; 148int radeon_r4xx_atom = 0;
155int radeon_agpmode = 0; 149int radeon_agpmode = 0;
156int radeon_vram_limit = 0; 150int radeon_vram_limit = 0;
157int radeon_gart_size = 512; /* default gart size */ 151int radeon_gart_size = -1; /* auto */
158int radeon_benchmarking = 0; 152int radeon_benchmarking = 0;
159int radeon_testing = 0; 153int radeon_testing = 0;
160int radeon_connector_table = 0; 154int radeon_connector_table = 0;
@@ -187,7 +181,7 @@ module_param_named(vramlimit, radeon_vram_limit, int, 0600);
187MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)"); 181MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
188module_param_named(agpmode, radeon_agpmode, int, 0444); 182module_param_named(agpmode, radeon_agpmode, int, 0444);
189 183
190MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)"); 184MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
191module_param_named(gartsize, radeon_gart_size, int, 0600); 185module_param_named(gartsize, radeon_gart_size, int, 0600);
192 186
193MODULE_PARM_DESC(benchmark, "Run benchmark"); 187MODULE_PARM_DESC(benchmark, "Run benchmark");
@@ -272,7 +266,6 @@ static const struct file_operations radeon_driver_old_fops = {
272 .unlocked_ioctl = drm_ioctl, 266 .unlocked_ioctl = drm_ioctl,
273 .mmap = drm_mmap, 267 .mmap = drm_mmap,
274 .poll = drm_poll, 268 .poll = drm_poll,
275 .fasync = drm_fasync,
276 .read = drm_read, 269 .read = drm_read,
277#ifdef CONFIG_COMPAT 270#ifdef CONFIG_COMPAT
278 .compat_ioctl = radeon_compat_ioctl, 271 .compat_ioctl = radeon_compat_ioctl,
@@ -282,7 +275,7 @@ static const struct file_operations radeon_driver_old_fops = {
282 275
283static struct drm_driver driver_old = { 276static struct drm_driver driver_old = {
284 .driver_features = 277 .driver_features =
285 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 278 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
286 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, 279 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
287 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 280 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
288 .load = radeon_driver_load, 281 .load = radeon_driver_load,
@@ -381,7 +374,6 @@ static const struct file_operations radeon_driver_kms_fops = {
381 .unlocked_ioctl = drm_ioctl, 374 .unlocked_ioctl = drm_ioctl,
382 .mmap = radeon_mmap, 375 .mmap = radeon_mmap,
383 .poll = drm_poll, 376 .poll = drm_poll,
384 .fasync = drm_fasync,
385 .read = drm_read, 377 .read = drm_read,
386#ifdef CONFIG_COMPAT 378#ifdef CONFIG_COMPAT
387 .compat_ioctl = radeon_kms_compat_ioctl, 379 .compat_ioctl = radeon_kms_compat_ioctl,
@@ -390,12 +382,11 @@ static const struct file_operations radeon_driver_kms_fops = {
390 382
391static struct drm_driver kms_driver = { 383static struct drm_driver kms_driver = {
392 .driver_features = 384 .driver_features =
393 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 385 DRIVER_USE_AGP |
394 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM | 386 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
395 DRIVER_PRIME, 387 DRIVER_PRIME | DRIVER_RENDER,
396 .dev_priv_size = 0, 388 .dev_priv_size = 0,
397 .load = radeon_driver_load_kms, 389 .load = radeon_driver_load_kms,
398 .firstopen = radeon_driver_firstopen_kms,
399 .open = radeon_driver_open_kms, 390 .open = radeon_driver_open_kms,
400 .preclose = radeon_driver_preclose_kms, 391 .preclose = radeon_driver_preclose_kms,
401 .postclose = radeon_driver_postclose_kms, 392 .postclose = radeon_driver_postclose_kms,
@@ -421,10 +412,9 @@ static struct drm_driver kms_driver = {
421 .gem_free_object = radeon_gem_object_free, 412 .gem_free_object = radeon_gem_object_free,
422 .gem_open_object = radeon_gem_object_open, 413 .gem_open_object = radeon_gem_object_open,
423 .gem_close_object = radeon_gem_object_close, 414 .gem_close_object = radeon_gem_object_close,
424 .dma_ioctl = radeon_dma_ioctl_kms,
425 .dumb_create = radeon_mode_dumb_create, 415 .dumb_create = radeon_mode_dumb_create,
426 .dumb_map_offset = radeon_mode_dumb_mmap, 416 .dumb_map_offset = radeon_mode_dumb_mmap,
427 .dumb_destroy = radeon_mode_dumb_destroy, 417 .dumb_destroy = drm_gem_dumb_destroy,
428 .fops = &radeon_driver_kms_fops, 418 .fops = &radeon_driver_kms_fops,
429 419
430 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 420 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ddb0efe2408..ddb8f8e04eb5 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
782 782
783 } else { 783 } else {
784 /* put fence directly behind firmware */ 784 /* put fence directly behind firmware */
785 index = ALIGN(rdev->uvd.fw_size, 8); 785 index = ALIGN(rdev->uvd_fw->size, 8);
786 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; 786 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
787 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; 787 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
788 } 788 }
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index d9d31a383276..b990b1a2bd50 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
207 if (rdev->gart.robj == NULL) { 207 if (rdev->gart.robj == NULL) {
208 return; 208 return;
209 } 209 }
210 radeon_gart_table_vram_unpin(rdev);
211 radeon_bo_unref(&rdev->gart.robj); 210 radeon_bo_unref(&rdev->gart.robj);
212} 211}
213 212
@@ -466,7 +465,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
466 size += rdev->vm_manager.max_pfn * 8; 465 size += rdev->vm_manager.max_pfn * 8;
467 size *= 2; 466 size *= 2;
468 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 467 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
469 RADEON_VM_PTB_ALIGN(size), 468 RADEON_GPU_PAGE_ALIGN(size),
470 RADEON_VM_PTB_ALIGN_SIZE, 469 RADEON_VM_PTB_ALIGN_SIZE,
471 RADEON_GEM_DOMAIN_VRAM); 470 RADEON_GEM_DOMAIN_VRAM);
472 if (r) { 471 if (r) {
@@ -621,7 +620,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
621 } 620 }
622 621
623retry: 622retry:
624 pd_size = RADEON_VM_PTB_ALIGN(radeon_vm_directory_size(rdev)); 623 pd_size = radeon_vm_directory_size(rdev);
625 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 624 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
626 &vm->page_directory, pd_size, 625 &vm->page_directory, pd_size,
627 RADEON_VM_PTB_ALIGN_SIZE, false); 626 RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -953,8 +952,8 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev,
953retry: 952retry:
954 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 953 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
955 &vm->page_tables[pt_idx], 954 &vm->page_tables[pt_idx],
956 RADEON_VM_PTB_ALIGN(RADEON_VM_PTE_COUNT * 8), 955 RADEON_VM_PTE_COUNT * 8,
957 RADEON_VM_PTB_ALIGN_SIZE, false); 956 RADEON_GPU_PAGE_SIZE, false);
958 957
959 if (r == -ENOMEM) { 958 if (r == -ENOMEM) {
960 r = radeon_vm_evict(rdev, vm); 959 r = radeon_vm_evict(rdev, vm);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index aa796031ab65..dce99c8a5835 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
570 return 0; 570 return 0;
571} 571}
572 572
573int radeon_mode_dumb_destroy(struct drm_file *file_priv,
574 struct drm_device *dev,
575 uint32_t handle)
576{
577 return drm_gem_handle_delete(file_priv, handle);
578}
579
580#if defined(CONFIG_DEBUG_FS) 573#if defined(CONFIG_DEBUG_FS)
581static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 574static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
582{ 575{
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 081886b0642d..cc9e8482cf30 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -275,17 +275,19 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
275 dev_info(rdev->dev, "radeon: using MSI.\n"); 275 dev_info(rdev->dev, "radeon: using MSI.\n");
276 } 276 }
277 } 277 }
278
279 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
280 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
281 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
282
278 rdev->irq.installed = true; 283 rdev->irq.installed = true;
279 r = drm_irq_install(rdev->ddev); 284 r = drm_irq_install(rdev->ddev);
280 if (r) { 285 if (r) {
281 rdev->irq.installed = false; 286 rdev->irq.installed = false;
287 flush_work(&rdev->hotplug_work);
282 return r; 288 return r;
283 } 289 }
284 290
285 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
286 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
287 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
288
289 DRM_INFO("radeon: irq initialized.\n"); 291 DRM_INFO("radeon: irq initialized.\n");
290 return 0; 292 return 0;
291} 293}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 49ff3d1a6102..61580ddc4eb2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -433,6 +433,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
433 return -EINVAL; 433 return -EINVAL;
434 } 434 }
435 break; 435 break;
436 case RADEON_INFO_SI_CP_DMA_COMPUTE:
437 *value = 1;
438 break;
436 default: 439 default:
437 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 440 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
438 return -EINVAL; 441 return -EINVAL;
@@ -449,19 +452,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
449 * Outdated mess for old drm with Xorg being in charge (void function now). 452 * Outdated mess for old drm with Xorg being in charge (void function now).
450 */ 453 */
451/** 454/**
452 * radeon_driver_firstopen_kms - drm callback for first open
453 *
454 * @dev: drm dev pointer
455 *
456 * Nothing to be done for KMS (all asics).
457 * Returns 0 on success.
458 */
459int radeon_driver_firstopen_kms(struct drm_device *dev)
460{
461 return 0;
462}
463
464/**
465 * radeon_driver_firstopen_kms - drm callback for last close 455 * radeon_driver_firstopen_kms - drm callback for last close
466 * 456 *
467 * @dev: drm dev pointer 457 * @dev: drm dev pointer
@@ -683,16 +673,6 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
683 drmcrtc); 673 drmcrtc);
684} 674}
685 675
686/*
687 * IOCTL.
688 */
689int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
690 struct drm_file *file_priv)
691{
692 /* Not valid in KMS. */
693 return -EINVAL;
694}
695
696#define KMS_INVALID_IOCTL(name) \ 676#define KMS_INVALID_IOCTL(name) \
697int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\ 677int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
698{ \ 678{ \
@@ -732,7 +712,7 @@ KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
732KMS_INVALID_IOCTL(radeon_surface_free_kms) 712KMS_INVALID_IOCTL(radeon_surface_free_kms)
733 713
734 714
735struct drm_ioctl_desc radeon_ioctls_kms[] = { 715const struct drm_ioctl_desc radeon_ioctls_kms[] = {
736 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 716 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
737 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 717 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
738 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 718 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -761,18 +741,18 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
761 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 741 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
762 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 742 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
763 /* KMS */ 743 /* KMS */
764 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 744 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
765 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), 745 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
766 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), 746 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
767 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), 747 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
768 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), 748 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
769 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), 749 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
770 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), 750 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
771 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), 751 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
772 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 752 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
773 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 753 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
774 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 754 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
775 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 755 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
776 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED), 756 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
777}; 757};
778int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 758int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 8296632a4235..d908d8d68f6b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -225,6 +225,7 @@ struct radeon_afmt {
225 int offset; 225 int offset;
226 bool last_buffer_filled_status; 226 bool last_buffer_filled_status;
227 int id; 227 int id;
228 struct r600_audio_pin *pin;
228}; 229};
229 230
230struct radeon_mode_info { 231struct radeon_mode_info {
@@ -233,7 +234,7 @@ struct radeon_mode_info {
233 enum radeon_connector_table connector_table; 234 enum radeon_connector_table connector_table;
234 bool mode_config_initialized; 235 bool mode_config_initialized;
235 struct radeon_crtc *crtcs[6]; 236 struct radeon_crtc *crtcs[6];
236 struct radeon_afmt *afmt[6]; 237 struct radeon_afmt *afmt[7];
237 /* DVI-I properties */ 238 /* DVI-I properties */
238 struct drm_property *coherent_mode_property; 239 struct drm_property *coherent_mode_property;
239 /* DAC enable load detect */ 240 /* DAC enable load detect */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2020bf4a3830..c0fa4aa9ceea 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -142,7 +142,6 @@ int radeon_bo_create(struct radeon_device *rdev,
142 return r; 142 return r;
143 } 143 }
144 bo->rdev = rdev; 144 bo->rdev = rdev;
145 bo->gem_base.driver_private = NULL;
146 bo->surface_reg = -1; 145 bo->surface_reg = -1;
147 INIT_LIST_HEAD(&bo->list); 146 INIT_LIST_HEAD(&bo->list);
148 INIT_LIST_HEAD(&bo->va); 147 INIT_LIST_HEAD(&bo->va);
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 49c82c480013..209b11150263 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -113,13 +113,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
113 * @bo: radeon object for which we query the offset 113 * @bo: radeon object for which we query the offset
114 * 114 *
115 * Returns mmap offset of the object. 115 * Returns mmap offset of the object.
116 *
117 * Note: addr_space_offset is constant after ttm bo init thus isn't protected
118 * by any lock.
119 */ 116 */
120static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) 117static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
121{ 118{
122 return bo->tbo.addr_space_offset; 119 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
123} 120}
124 121
125extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, 122extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f374c467aaca..d7555369a3e5 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -569,6 +569,8 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
569 case THERMAL_TYPE_NI: 569 case THERMAL_TYPE_NI:
570 case THERMAL_TYPE_SUMO: 570 case THERMAL_TYPE_SUMO:
571 case THERMAL_TYPE_SI: 571 case THERMAL_TYPE_SI:
572 case THERMAL_TYPE_CI:
573 case THERMAL_TYPE_KV:
572 if (rdev->asic->pm.get_temperature == NULL) 574 if (rdev->asic->pm.get_temperature == NULL)
573 return err; 575 return err;
574 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 576 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
@@ -624,7 +626,15 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
624 /* switch back the user state */ 626 /* switch back the user state */
625 dpm_state = rdev->pm.dpm.user_state; 627 dpm_state = rdev->pm.dpm.user_state;
626 } 628 }
627 radeon_dpm_enable_power_state(rdev, dpm_state); 629 mutex_lock(&rdev->pm.mutex);
630 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
631 rdev->pm.dpm.thermal_active = true;
632 else
633 rdev->pm.dpm.thermal_active = false;
634 rdev->pm.dpm.state = dpm_state;
635 mutex_unlock(&rdev->pm.mutex);
636
637 radeon_pm_compute_clocks(rdev);
628} 638}
629 639
630static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 640static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
@@ -687,7 +697,10 @@ restart_search:
687 break; 697 break;
688 /* internal states */ 698 /* internal states */
689 case POWER_STATE_TYPE_INTERNAL_UVD: 699 case POWER_STATE_TYPE_INTERNAL_UVD:
690 return rdev->pm.dpm.uvd_ps; 700 if (rdev->pm.dpm.uvd_ps)
701 return rdev->pm.dpm.uvd_ps;
702 else
703 break;
691 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 704 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
692 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 705 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
693 return ps; 706 return ps;
@@ -729,10 +742,17 @@ restart_search:
729 /* use a fallback state if we didn't match */ 742 /* use a fallback state if we didn't match */
730 switch (dpm_state) { 743 switch (dpm_state) {
731 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 744 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
745 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
746 goto restart_search;
732 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 747 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
733 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 748 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
734 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 749 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
735 return rdev->pm.dpm.uvd_ps; 750 if (rdev->pm.dpm.uvd_ps) {
751 return rdev->pm.dpm.uvd_ps;
752 } else {
753 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
754 goto restart_search;
755 }
736 case POWER_STATE_TYPE_INTERNAL_THERMAL: 756 case POWER_STATE_TYPE_INTERNAL_THERMAL:
737 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 757 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
738 goto restart_search; 758 goto restart_search;
@@ -850,38 +870,51 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
850 870
851 radeon_dpm_post_set_power_state(rdev); 871 radeon_dpm_post_set_power_state(rdev);
852 872
873 /* force low perf level for thermal */
874 if (rdev->pm.dpm.thermal_active &&
875 rdev->asic->dpm.force_performance_level) {
876 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
877 }
878
853done: 879done:
854 mutex_unlock(&rdev->ring_lock); 880 mutex_unlock(&rdev->ring_lock);
855 up_write(&rdev->pm.mclk_lock); 881 up_write(&rdev->pm.mclk_lock);
856 mutex_unlock(&rdev->ddev->struct_mutex); 882 mutex_unlock(&rdev->ddev->struct_mutex);
857} 883}
858 884
859void radeon_dpm_enable_power_state(struct radeon_device *rdev, 885void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
860 enum radeon_pm_state_type dpm_state)
861{ 886{
862 if (!rdev->pm.dpm_enabled) 887 enum radeon_pm_state_type dpm_state;
863 return;
864 888
865 mutex_lock(&rdev->pm.mutex); 889 if (rdev->asic->dpm.powergate_uvd) {
866 switch (dpm_state) { 890 mutex_lock(&rdev->pm.mutex);
867 case POWER_STATE_TYPE_INTERNAL_THERMAL: 891 /* enable/disable UVD */
868 rdev->pm.dpm.thermal_active = true; 892 radeon_dpm_powergate_uvd(rdev, !enable);
869 break; 893 mutex_unlock(&rdev->pm.mutex);
870 case POWER_STATE_TYPE_INTERNAL_UVD: 894 } else {
871 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 895 if (enable) {
872 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 896 mutex_lock(&rdev->pm.mutex);
873 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 897 rdev->pm.dpm.uvd_active = true;
874 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 898 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
875 rdev->pm.dpm.uvd_active = true; 899 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
876 break; 900 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
877 default: 901 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
878 rdev->pm.dpm.thermal_active = false; 902 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
879 rdev->pm.dpm.uvd_active = false; 903 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
880 break; 904 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
905 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
906 else
907 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
908 rdev->pm.dpm.state = dpm_state;
909 mutex_unlock(&rdev->pm.mutex);
910 } else {
911 mutex_lock(&rdev->pm.mutex);
912 rdev->pm.dpm.uvd_active = false;
913 mutex_unlock(&rdev->pm.mutex);
914 }
915
916 radeon_pm_compute_clocks(rdev);
881 } 917 }
882 rdev->pm.dpm.state = dpm_state;
883 mutex_unlock(&rdev->pm.mutex);
884 radeon_pm_compute_clocks(rdev);
885} 918}
886 919
887static void radeon_pm_suspend_old(struct radeon_device *rdev) 920static void radeon_pm_suspend_old(struct radeon_device *rdev)
@@ -1176,7 +1209,17 @@ int radeon_pm_init(struct radeon_device *rdev)
1176 case CHIP_VERDE: 1209 case CHIP_VERDE:
1177 case CHIP_OLAND: 1210 case CHIP_OLAND:
1178 case CHIP_HAINAN: 1211 case CHIP_HAINAN:
1179 if (radeon_dpm == 1) 1212 case CHIP_BONAIRE:
1213 case CHIP_KABINI:
1214 case CHIP_KAVERI:
1215 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1216 if (!rdev->rlc_fw)
1217 rdev->pm.pm_method = PM_METHOD_PROFILE;
1218 else if ((rdev->family >= CHIP_RV770) &&
1219 (!(rdev->flags & RADEON_IS_IGP)) &&
1220 (!rdev->smc_fw))
1221 rdev->pm.pm_method = PM_METHOD_PROFILE;
1222 else if (radeon_dpm == 1)
1180 rdev->pm.pm_method = PM_METHOD_DPM; 1223 rdev->pm.pm_method = PM_METHOD_DPM;
1181 else 1224 else
1182 rdev->pm.pm_method = PM_METHOD_PROFILE; 1225 rdev->pm.pm_method = PM_METHOD_PROFILE;
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 65b9eabd5a2f..20074560fc25 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -68,7 +68,6 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
68 RADEON_GEM_DOMAIN_GTT, sg, &bo); 68 RADEON_GEM_DOMAIN_GTT, sg, &bo);
69 if (ret) 69 if (ret)
70 return ERR_PTR(ret); 70 return ERR_PTR(ret);
71 bo->gem_base.driver_private = bo;
72 71
73 mutex_lock(&rdev->gem.mutex); 72 mutex_lock(&rdev->gem.mutex);
74 list_add_tail(&bo->list, &rdev->gem.objects); 73 list_add_tail(&bo->list, &rdev->gem.objects);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index fb5ea6208970..46a25f037b84 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -363,11 +363,10 @@ u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
363{ 363{
364 u32 rptr; 364 u32 rptr;
365 365
366 if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX]) 366 if (rdev->wb.enabled)
367 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 367 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
368 else 368 else
369 rptr = RREG32(ring->rptr_reg); 369 rptr = RREG32(ring->rptr_reg);
370 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
371 370
372 return rptr; 371 return rptr;
373} 372}
@@ -378,7 +377,6 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
378 u32 wptr; 377 u32 wptr;
379 378
380 wptr = RREG32(ring->wptr_reg); 379 wptr = RREG32(ring->wptr_reg);
381 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
382 380
383 return wptr; 381 return wptr;
384} 382}
@@ -386,7 +384,7 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
386void radeon_ring_generic_set_wptr(struct radeon_device *rdev, 384void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
387 struct radeon_ring *ring) 385 struct radeon_ring *ring)
388{ 386{
389 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); 387 WREG32(ring->wptr_reg, ring->wptr);
390 (void)RREG32(ring->wptr_reg); 388 (void)RREG32(ring->wptr_reg);
391} 389}
392 390
@@ -719,16 +717,13 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
719 * @rptr_offs: offset of the rptr writeback location in the WB buffer 717 * @rptr_offs: offset of the rptr writeback location in the WB buffer
720 * @rptr_reg: MMIO offset of the rptr register 718 * @rptr_reg: MMIO offset of the rptr register
721 * @wptr_reg: MMIO offset of the wptr register 719 * @wptr_reg: MMIO offset of the wptr register
722 * @ptr_reg_shift: bit offset of the rptr/wptr values
723 * @ptr_reg_mask: bit mask of the rptr/wptr values
724 * @nop: nop packet for this ring 720 * @nop: nop packet for this ring
725 * 721 *
726 * Initialize the driver information for the selected ring (all asics). 722 * Initialize the driver information for the selected ring (all asics).
727 * Returns 0 on success, error on failure. 723 * Returns 0 on success, error on failure.
728 */ 724 */
729int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, 725int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
730 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 726 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop)
731 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
732{ 727{
733 int r; 728 int r;
734 729
@@ -736,8 +731,6 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
736 ring->rptr_offs = rptr_offs; 731 ring->rptr_offs = rptr_offs;
737 ring->rptr_reg = rptr_reg; 732 ring->rptr_reg = rptr_reg;
738 ring->wptr_reg = wptr_reg; 733 ring->wptr_reg = wptr_reg;
739 ring->ptr_reg_shift = ptr_reg_shift;
740 ring->ptr_reg_mask = ptr_reg_mask;
741 ring->nop = nop; 734 ring->nop = nop;
742 /* Allocate ring buffer */ 735 /* Allocate ring buffer */
743 if (ring->ring_obj == NULL) { 736 if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6c0ce8915fac..71245d6f34a2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -203,7 +203,9 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
203 203
204static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) 204static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
205{ 205{
206 return 0; 206 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
207
208 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
207} 209}
208 210
209static void radeon_move_null(struct ttm_buffer_object *bo, 211static void radeon_move_null(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index d8b05f7bcf1a..33858364fe89 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -35,6 +35,12 @@
35#define SI_PFP_UCODE_SIZE 2144 35#define SI_PFP_UCODE_SIZE 2144
36#define SI_PM4_UCODE_SIZE 2144 36#define SI_PM4_UCODE_SIZE 2144
37#define SI_CE_UCODE_SIZE 2144 37#define SI_CE_UCODE_SIZE 2144
38#define CIK_PFP_UCODE_SIZE 2144
39#define CIK_ME_UCODE_SIZE 2144
40#define CIK_CE_UCODE_SIZE 2144
41
42/* MEC */
43#define CIK_MEC_UCODE_SIZE 4192
38 44
39/* RLC */ 45/* RLC */
40#define R600_RLC_UCODE_SIZE 768 46#define R600_RLC_UCODE_SIZE 768
@@ -43,12 +49,20 @@
43#define CAYMAN_RLC_UCODE_SIZE 1024 49#define CAYMAN_RLC_UCODE_SIZE 1024
44#define ARUBA_RLC_UCODE_SIZE 1536 50#define ARUBA_RLC_UCODE_SIZE 1536
45#define SI_RLC_UCODE_SIZE 2048 51#define SI_RLC_UCODE_SIZE 2048
52#define BONAIRE_RLC_UCODE_SIZE 2048
53#define KB_RLC_UCODE_SIZE 2560
54#define KV_RLC_UCODE_SIZE 2560
46 55
47/* MC */ 56/* MC */
48#define BTC_MC_UCODE_SIZE 6024 57#define BTC_MC_UCODE_SIZE 6024
49#define CAYMAN_MC_UCODE_SIZE 6037 58#define CAYMAN_MC_UCODE_SIZE 6037
50#define SI_MC_UCODE_SIZE 7769 59#define SI_MC_UCODE_SIZE 7769
51#define OLAND_MC_UCODE_SIZE 7863 60#define OLAND_MC_UCODE_SIZE 7863
61#define CIK_MC_UCODE_SIZE 7866
62
63/* SDMA */
64#define CIK_SDMA_UCODE_SIZE 1050
65#define CIK_SDMA_UCODE_VERSION 64
52 66
53/* SMC */ 67/* SMC */
54#define RV770_SMC_UCODE_START 0x0100 68#define RV770_SMC_UCODE_START 0x0100
@@ -126,4 +140,7 @@
126#define HAINAN_SMC_UCODE_START 0x10000 140#define HAINAN_SMC_UCODE_START 0x10000
127#define HAINAN_SMC_UCODE_SIZE 0xe67C 141#define HAINAN_SMC_UCODE_SIZE 0xe67C
128 142
143#define BONAIRE_SMC_UCODE_START 0x20000
144#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC
145
129#endif 146#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 414fd145d20e..1a01bbff9bfa 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -56,7 +56,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
56 56
57int radeon_uvd_init(struct radeon_device *rdev) 57int radeon_uvd_init(struct radeon_device *rdev)
58{ 58{
59 const struct firmware *fw;
60 unsigned long bo_size; 59 unsigned long bo_size;
61 const char *fw_name; 60 const char *fw_name;
62 int i, r; 61 int i, r;
@@ -105,14 +104,14 @@ int radeon_uvd_init(struct radeon_device *rdev)
105 return -EINVAL; 104 return -EINVAL;
106 } 105 }
107 106
108 r = request_firmware(&fw, fw_name, rdev->dev); 107 r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
109 if (r) { 108 if (r) {
110 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", 109 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
111 fw_name); 110 fw_name);
112 return r; 111 return r;
113 } 112 }
114 113
115 bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) + 114 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
116 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; 115 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
117 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, 116 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
118 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); 117 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
@@ -145,15 +144,10 @@ int radeon_uvd_init(struct radeon_device *rdev)
145 144
146 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 145 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
147 146
148 rdev->uvd.fw_size = fw->size;
149 memset(rdev->uvd.cpu_addr, 0, bo_size);
150 memcpy(rdev->uvd.cpu_addr, fw->data, fw->size);
151
152 release_firmware(fw);
153
154 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 147 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
155 atomic_set(&rdev->uvd.handles[i], 0); 148 atomic_set(&rdev->uvd.handles[i], 0);
156 rdev->uvd.filp[i] = NULL; 149 rdev->uvd.filp[i] = NULL;
150 rdev->uvd.img_size[i] = 0;
157 } 151 }
158 152
159 return 0; 153 return 0;
@@ -174,33 +168,60 @@ void radeon_uvd_fini(struct radeon_device *rdev)
174 } 168 }
175 169
176 radeon_bo_unref(&rdev->uvd.vcpu_bo); 170 radeon_bo_unref(&rdev->uvd.vcpu_bo);
171
172 release_firmware(rdev->uvd_fw);
177} 173}
178 174
179int radeon_uvd_suspend(struct radeon_device *rdev) 175int radeon_uvd_suspend(struct radeon_device *rdev)
180{ 176{
181 unsigned size; 177 unsigned size;
178 void *ptr;
179 int i;
182 180
183 if (rdev->uvd.vcpu_bo == NULL) 181 if (rdev->uvd.vcpu_bo == NULL)
184 return 0; 182 return 0;
185 183
184 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
185 if (atomic_read(&rdev->uvd.handles[i]))
186 break;
187
188 if (i == RADEON_MAX_UVD_HANDLES)
189 return 0;
190
186 size = radeon_bo_size(rdev->uvd.vcpu_bo); 191 size = radeon_bo_size(rdev->uvd.vcpu_bo);
192 size -= rdev->uvd_fw->size;
193
194 ptr = rdev->uvd.cpu_addr;
195 ptr += rdev->uvd_fw->size;
196
187 rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); 197 rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
188 memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size); 198 memcpy(rdev->uvd.saved_bo, ptr, size);
189 199
190 return 0; 200 return 0;
191} 201}
192 202
193int radeon_uvd_resume(struct radeon_device *rdev) 203int radeon_uvd_resume(struct radeon_device *rdev)
194{ 204{
205 unsigned size;
206 void *ptr;
207
195 if (rdev->uvd.vcpu_bo == NULL) 208 if (rdev->uvd.vcpu_bo == NULL)
196 return -EINVAL; 209 return -EINVAL;
197 210
211 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
212
213 size = radeon_bo_size(rdev->uvd.vcpu_bo);
214 size -= rdev->uvd_fw->size;
215
216 ptr = rdev->uvd.cpu_addr;
217 ptr += rdev->uvd_fw->size;
218
198 if (rdev->uvd.saved_bo != NULL) { 219 if (rdev->uvd.saved_bo != NULL) {
199 unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo); 220 memcpy(ptr, rdev->uvd.saved_bo, size);
200 memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size);
201 kfree(rdev->uvd.saved_bo); 221 kfree(rdev->uvd.saved_bo);
202 rdev->uvd.saved_bo = NULL; 222 rdev->uvd.saved_bo = NULL;
203 } 223 } else
224 memset(ptr, 0, size);
204 225
205 return 0; 226 return 0;
206} 227}
@@ -215,8 +236,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
215{ 236{
216 int i, r; 237 int i, r;
217 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 238 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
218 if (rdev->uvd.filp[i] == filp) { 239 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
219 uint32_t handle = atomic_read(&rdev->uvd.handles[i]); 240 if (handle != 0 && rdev->uvd.filp[i] == filp) {
220 struct radeon_fence *fence; 241 struct radeon_fence *fence;
221 242
222 r = radeon_uvd_get_destroy_msg(rdev, 243 r = radeon_uvd_get_destroy_msg(rdev,
@@ -327,6 +348,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
327 unsigned offset, unsigned buf_sizes[]) 348 unsigned offset, unsigned buf_sizes[])
328{ 349{
329 int32_t *msg, msg_type, handle; 350 int32_t *msg, msg_type, handle;
351 unsigned img_size = 0;
330 void *ptr; 352 void *ptr;
331 353
332 int i, r; 354 int i, r;
@@ -336,9 +358,19 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
336 return -EINVAL; 358 return -EINVAL;
337 } 359 }
338 360
361 if (bo->tbo.sync_obj) {
362 r = radeon_fence_wait(bo->tbo.sync_obj, false);
363 if (r) {
364 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
365 return r;
366 }
367 }
368
339 r = radeon_bo_kmap(bo, &ptr); 369 r = radeon_bo_kmap(bo, &ptr);
340 if (r) 370 if (r) {
371 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
341 return r; 372 return r;
373 }
342 374
343 msg = ptr + offset; 375 msg = ptr + offset;
344 376
@@ -353,6 +385,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
353 if (msg_type == 1) { 385 if (msg_type == 1) {
354 /* it's a decode msg, calc buffer sizes */ 386 /* it's a decode msg, calc buffer sizes */
355 r = radeon_uvd_cs_msg_decode(msg, buf_sizes); 387 r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
388 /* calc image size (width * height) */
389 img_size = msg[6] * msg[7];
356 radeon_bo_kunmap(bo); 390 radeon_bo_kunmap(bo);
357 if (r) 391 if (r)
358 return r; 392 return r;
@@ -364,8 +398,16 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
364 radeon_bo_kunmap(bo); 398 radeon_bo_kunmap(bo);
365 return 0; 399 return 0;
366 } else { 400 } else {
367 /* it's a create msg, no special handling needed */ 401 /* it's a create msg, calc image size (width * height) */
402 img_size = msg[7] * msg[8];
368 radeon_bo_kunmap(bo); 403 radeon_bo_kunmap(bo);
404
405 if (msg_type != 0) {
406 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
407 return -EINVAL;
408 }
409
410 /* it's a create msg, no special handling needed */
369 } 411 }
370 412
371 /* create or decode, validate the handle */ 413 /* create or decode, validate the handle */
@@ -378,6 +420,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
378 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 420 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
379 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { 421 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
380 p->rdev->uvd.filp[i] = p->filp; 422 p->rdev->uvd.filp[i] = p->filp;
423 p->rdev->uvd.img_size[i] = img_size;
381 return 0; 424 return 0;
382 } 425 }
383 } 426 }
@@ -388,7 +431,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
388 431
389static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, 432static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
390 int data0, int data1, 433 int data0, int data1,
391 unsigned buf_sizes[]) 434 unsigned buf_sizes[], bool *has_msg_cmd)
392{ 435{
393 struct radeon_cs_chunk *relocs_chunk; 436 struct radeon_cs_chunk *relocs_chunk;
394 struct radeon_cs_reloc *reloc; 437 struct radeon_cs_reloc *reloc;
@@ -417,7 +460,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
417 460
418 if (cmd < 0x4) { 461 if (cmd < 0x4) {
419 if ((end - start) < buf_sizes[cmd]) { 462 if ((end - start) < buf_sizes[cmd]) {
420 DRM_ERROR("buffer to small (%d / %d)!\n", 463 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
421 (unsigned)(end - start), buf_sizes[cmd]); 464 (unsigned)(end - start), buf_sizes[cmd]);
422 return -EINVAL; 465 return -EINVAL;
423 } 466 }
@@ -442,9 +485,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
442 } 485 }
443 486
444 if (cmd == 0) { 487 if (cmd == 0) {
488 if (*has_msg_cmd) {
489 DRM_ERROR("More than one message in a UVD-IB!\n");
490 return -EINVAL;
491 }
492 *has_msg_cmd = true;
445 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); 493 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
446 if (r) 494 if (r)
447 return r; 495 return r;
496 } else if (!*has_msg_cmd) {
497 DRM_ERROR("Message needed before other commands are send!\n");
498 return -EINVAL;
448 } 499 }
449 500
450 return 0; 501 return 0;
@@ -453,7 +504,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
453static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, 504static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
454 struct radeon_cs_packet *pkt, 505 struct radeon_cs_packet *pkt,
455 int *data0, int *data1, 506 int *data0, int *data1,
456 unsigned buf_sizes[]) 507 unsigned buf_sizes[],
508 bool *has_msg_cmd)
457{ 509{
458 int i, r; 510 int i, r;
459 511
@@ -467,7 +519,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
467 *data1 = p->idx; 519 *data1 = p->idx;
468 break; 520 break;
469 case UVD_GPCOM_VCPU_CMD: 521 case UVD_GPCOM_VCPU_CMD:
470 r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes); 522 r = radeon_uvd_cs_reloc(p, *data0, *data1,
523 buf_sizes, has_msg_cmd);
471 if (r) 524 if (r)
472 return r; 525 return r;
473 break; 526 break;
@@ -488,6 +541,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
488 struct radeon_cs_packet pkt; 541 struct radeon_cs_packet pkt;
489 int r, data0 = 0, data1 = 0; 542 int r, data0 = 0, data1 = 0;
490 543
544 /* does the IB has a msg command */
545 bool has_msg_cmd = false;
546
491 /* minimum buffer sizes */ 547 /* minimum buffer sizes */
492 unsigned buf_sizes[] = { 548 unsigned buf_sizes[] = {
493 [0x00000000] = 2048, 549 [0x00000000] = 2048,
@@ -514,8 +570,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
514 return r; 570 return r;
515 switch (pkt.type) { 571 switch (pkt.type) {
516 case RADEON_PACKET_TYPE0: 572 case RADEON_PACKET_TYPE0:
517 r = radeon_uvd_cs_reg(p, &pkt, &data0, 573 r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
518 &data1, buf_sizes); 574 buf_sizes, &has_msg_cmd);
519 if (r) 575 if (r)
520 return r; 576 return r;
521 break; 577 break;
@@ -527,6 +583,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
527 return -EINVAL; 583 return -EINVAL;
528 } 584 }
529 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 585 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
586
587 if (!has_msg_cmd) {
588 DRM_ERROR("UVD-IBs need a msg command!\n");
589 return -EINVAL;
590 }
591
530 return 0; 592 return 0;
531} 593}
532 594
@@ -678,6 +740,34 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
678 return radeon_uvd_send_msg(rdev, ring, bo, fence); 740 return radeon_uvd_send_msg(rdev, ring, bo, fence);
679} 741}
680 742
743/**
744 * radeon_uvd_count_handles - count number of open streams
745 *
746 * @rdev: radeon_device pointer
747 * @sd: number of SD streams
748 * @hd: number of HD streams
749 *
750 * Count the number of open SD/HD streams as a hint for power mangement
751 */
752static void radeon_uvd_count_handles(struct radeon_device *rdev,
753 unsigned *sd, unsigned *hd)
754{
755 unsigned i;
756
757 *sd = 0;
758 *hd = 0;
759
760 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
761 if (!atomic_read(&rdev->uvd.handles[i]))
762 continue;
763
764 if (rdev->uvd.img_size[i] >= 720*576)
765 ++(*hd);
766 else
767 ++(*sd);
768 }
769}
770
681static void radeon_uvd_idle_work_handler(struct work_struct *work) 771static void radeon_uvd_idle_work_handler(struct work_struct *work)
682{ 772{
683 struct radeon_device *rdev = 773 struct radeon_device *rdev =
@@ -685,10 +775,7 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
685 775
686 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) { 776 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
687 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 777 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
688 mutex_lock(&rdev->pm.mutex); 778 radeon_dpm_enable_uvd(rdev, false);
689 rdev->pm.dpm.uvd_active = false;
690 mutex_unlock(&rdev->pm.mutex);
691 radeon_pm_compute_clocks(rdev);
692 } else { 779 } else {
693 radeon_set_uvd_clocks(rdev, 0, 0); 780 radeon_set_uvd_clocks(rdev, 0, 0);
694 } 781 }
@@ -700,13 +787,25 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
700 787
701void radeon_uvd_note_usage(struct radeon_device *rdev) 788void radeon_uvd_note_usage(struct radeon_device *rdev)
702{ 789{
790 bool streams_changed = false;
703 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); 791 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
704 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, 792 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
705 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 793 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
706 if (set_clocks) { 794
795 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
796 unsigned hd = 0, sd = 0;
797 radeon_uvd_count_handles(rdev, &sd, &hd);
798 if ((rdev->pm.dpm.sd != sd) ||
799 (rdev->pm.dpm.hd != hd)) {
800 rdev->pm.dpm.sd = sd;
801 rdev->pm.dpm.hd = hd;
802 streams_changed = true;
803 }
804 }
805
806 if (set_clocks || streams_changed) {
707 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 807 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
708 /* XXX pick SD/HD/MVC */ 808 radeon_dpm_enable_uvd(rdev, true);
709 radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD);
710 } else { 809 } else {
711 radeon_set_uvd_clocks(rdev, 53300, 40000); 810 radeon_set_uvd_clocks(rdev, 53300, 40000);
712 } 811 }
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 233a9b9fa1f7..b8074a8ec75a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev)
174 /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0, 174 /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
175 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */ 175 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
176 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 176 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
177 WREG32_MC(RS480_MC_MISC_CNTL, 177 tmp = RREG32_MC(RS480_MC_MISC_CNTL);
178 (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); 178 tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
179 WREG32_MC(RS480_MC_MISC_CNTL, tmp);
179 } else { 180 } else {
180 WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); 181 tmp = RREG32_MC(RS480_MC_MISC_CNTL);
182 tmp |= RS480_GART_INDEX_REG_EN;
183 WREG32_MC(RS480_MC_MISC_CNTL, tmp);
181 } 184 }
182 /* Enable gart */ 185 /* Enable gart */
183 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); 186 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 65e33f387341..ab1f2016f21e 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev)
819 POWERMODE1(calculate_memory_refresh_rate(rdev, 819 POWERMODE1(calculate_memory_refresh_rate(rdev,
820 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | 820 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
821 POWERMODE2(calculate_memory_refresh_rate(rdev, 821 POWERMODE2(calculate_memory_refresh_rate(rdev,
822 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | 822 pi->hw.sclks[R600_POWER_LEVEL_HIGH])) |
823 POWERMODE3(calculate_memory_refresh_rate(rdev, 823 POWERMODE3(calculate_memory_refresh_rate(rdev,
824 pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); 824 pi->hw.sclks[R600_POWER_LEVEL_HIGH])));
825 WREG32(ARB_RFSH_RATE, arb_refresh_rate); 825 WREG32(ARB_RFSH_RATE, arb_refresh_rate);
@@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev)
1182 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); 1182 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1183 1183
1184 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); 1184 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1185 if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) { 1185 if (rdev->pm.dpm.new_active_crtcs & 1) {
1186 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); 1186 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1187 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); 1187 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1188 } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) { 1188 } else if (rdev->pm.dpm.new_active_crtcs & 2) {
1189 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); 1189 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1190 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); 1190 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1191 } else { 1191 } else {
@@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1670 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; 1670 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
1671 int ret; 1671 int ret;
1672 1672
1673 pi->restricted_levels = 0;
1674
1673 rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 1675 rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1674 1676
1675 rv6xx_clear_vc(rdev); 1677 rv6xx_clear_vc(rdev);
@@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1756 1758
1757 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1759 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1758 1760
1761 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1762
1759 return 0; 1763 return 0;
1760} 1764}
1761 1765
@@ -1914,6 +1918,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
1914 (power_state->v1.ucNonClockStateIndex * 1918 (power_state->v1.ucNonClockStateIndex *
1915 power_info->pplib.ucNonClockSize)); 1919 power_info->pplib.ucNonClockSize));
1916 if (power_info->pplib.ucStateEntrySize - 1) { 1920 if (power_info->pplib.ucStateEntrySize - 1) {
1921 u8 *idx;
1917 ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL); 1922 ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL);
1918 if (ps == NULL) { 1923 if (ps == NULL) {
1919 kfree(rdev->pm.dpm.ps); 1924 kfree(rdev->pm.dpm.ps);
@@ -1922,12 +1927,12 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
1922 rdev->pm.dpm.ps[i].ps_priv = ps; 1927 rdev->pm.dpm.ps[i].ps_priv = ps;
1923 rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 1928 rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
1924 non_clock_info); 1929 non_clock_info);
1930 idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
1925 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 1931 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
1926 clock_info = (union pplib_clock_info *) 1932 clock_info = (union pplib_clock_info *)
1927 (mode_info->atom_context->bios + data_offset + 1933 (mode_info->atom_context->bios + data_offset +
1928 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 1934 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
1929 (power_state->v1.ucClockStateIndices[j] * 1935 (idx[j] * power_info->pplib.ucClockInfoSize));
1930 power_info->pplib.ucClockInfoSize));
1931 rv6xx_parse_pplib_clock_info(rdev, 1936 rv6xx_parse_pplib_clock_info(rdev,
1932 &rdev->pm.dpm.ps[i], j, 1937 &rdev->pm.dpm.ps[i], j,
1933 clock_info); 1938 clock_info);
@@ -1940,9 +1945,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
1940 1945
1941int rv6xx_dpm_init(struct radeon_device *rdev) 1946int rv6xx_dpm_init(struct radeon_device *rdev)
1942{ 1947{
1943 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 1948 struct radeon_atom_ss ss;
1944 uint16_t data_offset, size;
1945 uint8_t frev, crev;
1946 struct atom_clock_dividers dividers; 1949 struct atom_clock_dividers dividers;
1947 struct rv6xx_power_info *pi; 1950 struct rv6xx_power_info *pi;
1948 int ret; 1951 int ret;
@@ -1985,16 +1988,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev)
1985 1988
1986 pi->gfx_clock_gating = true; 1989 pi->gfx_clock_gating = true;
1987 1990
1988 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 1991 pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
1989 &frev, &crev, &data_offset)) { 1992 ASIC_INTERNAL_ENGINE_SS, 0);
1990 pi->sclk_ss = true; 1993 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
1991 pi->mclk_ss = true; 1994 ASIC_INTERNAL_MEMORY_SS, 0);
1995
1996 /* Disable sclk ss, causes hangs on a lot of systems */
1997 pi->sclk_ss = false;
1998
1999 if (pi->sclk_ss || pi->mclk_ss)
1992 pi->dynamic_ss = true; 2000 pi->dynamic_ss = true;
1993 } else { 2001 else
1994 pi->sclk_ss = false;
1995 pi->mclk_ss = false;
1996 pi->dynamic_ss = false; 2002 pi->dynamic_ss = false;
1997 }
1998 2003
1999 pi->dynamic_pcie_gen2 = true; 2004 pi->dynamic_pcie_gen2 = true;
2000 2005
@@ -2085,3 +2090,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
2085 else 2090 else
2086 return requested_state->high.mclk; 2091 return requested_state->high.mclk;
2087} 2092}
2093
2094int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
2095 enum radeon_dpm_forced_level level)
2096{
2097 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
2098
2099 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
2100 pi->restricted_levels = 3;
2101 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
2102 pi->restricted_levels = 2;
2103 } else {
2104 pi->restricted_levels = 0;
2105 }
2106
2107 rv6xx_clear_vc(rdev);
2108 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
2109 r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
2110 r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
2111 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
2112 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
2113 rv6xx_enable_medium(rdev);
2114 rv6xx_enable_high(rdev);
2115 if (pi->restricted_levels == 3)
2116 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
2117 rv6xx_program_vc(rdev);
2118 rv6xx_program_at(rdev);
2119
2120 rdev->pm.dpm.forced_level = level;
2121
2122 return 0;
2123}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 30ea14e8854c..9f5846743c9e 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
744 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); 744 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
745 radeon_program_register_sequence(rdev, 745 radeon_program_register_sequence(rdev,
746 rv730_golden_registers, 746 rv730_golden_registers,
747 (const u32)ARRAY_SIZE(rv770_golden_registers)); 747 (const u32)ARRAY_SIZE(rv730_golden_registers));
748 radeon_program_register_sequence(rdev, 748 radeon_program_register_sequence(rdev,
749 rv730_mgcg_init, 749 rv730_mgcg_init,
750 (const u32)ARRAY_SIZE(rv770_mgcg_init)); 750 (const u32)ARRAY_SIZE(rv730_mgcg_init));
751 break; 751 break;
752 case CHIP_RV710: 752 case CHIP_RV710:
753 radeon_program_register_sequence(rdev, 753 radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
758 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); 758 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
759 radeon_program_register_sequence(rdev, 759 radeon_program_register_sequence(rdev,
760 rv710_golden_registers, 760 rv710_golden_registers,
761 (const u32)ARRAY_SIZE(rv770_golden_registers)); 761 (const u32)ARRAY_SIZE(rv710_golden_registers));
762 radeon_program_register_sequence(rdev, 762 radeon_program_register_sequence(rdev,
763 rv710_mgcg_init, 763 rv710_mgcg_init,
764 (const u32)ARRAY_SIZE(rv770_mgcg_init)); 764 (const u32)ARRAY_SIZE(rv710_mgcg_init));
765 break; 765 break;
766 case CHIP_RV740: 766 case CHIP_RV740:
767 radeon_program_register_sequence(rdev, 767 radeon_program_register_sequence(rdev,
768 rv740_golden_registers, 768 rv740_golden_registers,
769 (const u32)ARRAY_SIZE(rv770_golden_registers)); 769 (const u32)ARRAY_SIZE(rv740_golden_registers));
770 radeon_program_register_sequence(rdev, 770 radeon_program_register_sequence(rdev,
771 rv740_mgcg_init, 771 rv740_mgcg_init,
772 (const u32)ARRAY_SIZE(rv770_mgcg_init)); 772 (const u32)ARRAY_SIZE(rv740_mgcg_init));
773 break; 773 break;
774 default: 774 default:
775 break; 775 break;
@@ -801,103 +801,6 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
801 return reference_clock; 801 return reference_clock;
802} 802}
803 803
804int rv770_uvd_resume(struct radeon_device *rdev)
805{
806 uint64_t addr;
807 uint32_t chip_id, size;
808 int r;
809
810 r = radeon_uvd_resume(rdev);
811 if (r)
812 return r;
813
814 /* programm the VCPU memory controller bits 0-27 */
815 addr = rdev->uvd.gpu_addr >> 3;
816 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
817 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
818 WREG32(UVD_VCPU_CACHE_SIZE0, size);
819
820 addr += size;
821 size = RADEON_UVD_STACK_SIZE >> 3;
822 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
823 WREG32(UVD_VCPU_CACHE_SIZE1, size);
824
825 addr += size;
826 size = RADEON_UVD_HEAP_SIZE >> 3;
827 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
828 WREG32(UVD_VCPU_CACHE_SIZE2, size);
829
830 /* bits 28-31 */
831 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
832 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
833
834 /* bits 32-39 */
835 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
836 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
837
838 /* tell firmware which hardware it is running on */
839 switch (rdev->family) {
840 default:
841 return -EINVAL;
842 case CHIP_RV710:
843 chip_id = 0x01000005;
844 break;
845 case CHIP_RV730:
846 chip_id = 0x01000006;
847 break;
848 case CHIP_RV740:
849 chip_id = 0x01000007;
850 break;
851 case CHIP_CYPRESS:
852 case CHIP_HEMLOCK:
853 chip_id = 0x01000008;
854 break;
855 case CHIP_JUNIPER:
856 chip_id = 0x01000009;
857 break;
858 case CHIP_REDWOOD:
859 chip_id = 0x0100000a;
860 break;
861 case CHIP_CEDAR:
862 chip_id = 0x0100000b;
863 break;
864 case CHIP_SUMO:
865 case CHIP_SUMO2:
866 chip_id = 0x0100000c;
867 break;
868 case CHIP_PALM:
869 chip_id = 0x0100000e;
870 break;
871 case CHIP_CAYMAN:
872 chip_id = 0x0100000f;
873 break;
874 case CHIP_BARTS:
875 chip_id = 0x01000010;
876 break;
877 case CHIP_TURKS:
878 chip_id = 0x01000011;
879 break;
880 case CHIP_CAICOS:
881 chip_id = 0x01000012;
882 break;
883 case CHIP_TAHITI:
884 chip_id = 0x01000014;
885 break;
886 case CHIP_VERDE:
887 chip_id = 0x01000015;
888 break;
889 case CHIP_PITCAIRN:
890 chip_id = 0x01000016;
891 break;
892 case CHIP_ARUBA:
893 chip_id = 0x01000017;
894 break;
895 }
896 WREG32(UVD_VCPU_CHIP_ID, chip_id);
897
898 return 0;
899}
900
901u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 804u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
902{ 805{
903 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 806 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -1747,80 +1650,6 @@ static int rv770_mc_init(struct radeon_device *rdev)
1747 return 0; 1650 return 0;
1748} 1651}
1749 1652
1750/**
1751 * rv770_copy_dma - copy pages using the DMA engine
1752 *
1753 * @rdev: radeon_device pointer
1754 * @src_offset: src GPU address
1755 * @dst_offset: dst GPU address
1756 * @num_gpu_pages: number of GPU pages to xfer
1757 * @fence: radeon fence object
1758 *
1759 * Copy GPU paging using the DMA engine (r7xx).
1760 * Used by the radeon ttm implementation to move pages if
1761 * registered as the asic copy callback.
1762 */
1763int rv770_copy_dma(struct radeon_device *rdev,
1764 uint64_t src_offset, uint64_t dst_offset,
1765 unsigned num_gpu_pages,
1766 struct radeon_fence **fence)
1767{
1768 struct radeon_semaphore *sem = NULL;
1769 int ring_index = rdev->asic->copy.dma_ring_index;
1770 struct radeon_ring *ring = &rdev->ring[ring_index];
1771 u32 size_in_dw, cur_size_in_dw;
1772 int i, num_loops;
1773 int r = 0;
1774
1775 r = radeon_semaphore_create(rdev, &sem);
1776 if (r) {
1777 DRM_ERROR("radeon: moving bo (%d).\n", r);
1778 return r;
1779 }
1780
1781 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
1782 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
1783 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
1784 if (r) {
1785 DRM_ERROR("radeon: moving bo (%d).\n", r);
1786 radeon_semaphore_free(rdev, &sem, NULL);
1787 return r;
1788 }
1789
1790 if (radeon_fence_need_sync(*fence, ring->idx)) {
1791 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
1792 ring->idx);
1793 radeon_fence_note_sync(*fence, ring->idx);
1794 } else {
1795 radeon_semaphore_free(rdev, &sem, NULL);
1796 }
1797
1798 for (i = 0; i < num_loops; i++) {
1799 cur_size_in_dw = size_in_dw;
1800 if (cur_size_in_dw > 0xFFFF)
1801 cur_size_in_dw = 0xFFFF;
1802 size_in_dw -= cur_size_in_dw;
1803 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
1804 radeon_ring_write(ring, dst_offset & 0xfffffffc);
1805 radeon_ring_write(ring, src_offset & 0xfffffffc);
1806 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
1807 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
1808 src_offset += cur_size_in_dw * 4;
1809 dst_offset += cur_size_in_dw * 4;
1810 }
1811
1812 r = radeon_fence_emit(rdev, fence, ring->idx);
1813 if (r) {
1814 radeon_ring_unlock_undo(rdev, ring);
1815 return r;
1816 }
1817
1818 radeon_ring_unlock_commit(rdev, ring);
1819 radeon_semaphore_free(rdev, &sem, *fence);
1820
1821 return r;
1822}
1823
1824static int rv770_startup(struct radeon_device *rdev) 1653static int rv770_startup(struct radeon_device *rdev)
1825{ 1654{
1826 struct radeon_ring *ring; 1655 struct radeon_ring *ring;
@@ -1829,6 +1658,13 @@ static int rv770_startup(struct radeon_device *rdev)
1829 /* enable pcie gen2 link */ 1658 /* enable pcie gen2 link */
1830 rv770_pcie_gen2_enable(rdev); 1659 rv770_pcie_gen2_enable(rdev);
1831 1660
1661 /* scratch needs to be initialized before MC */
1662 r = r600_vram_scratch_init(rdev);
1663 if (r)
1664 return r;
1665
1666 rv770_mc_program(rdev);
1667
1832 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1668 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1833 r = r600_init_microcode(rdev); 1669 r = r600_init_microcode(rdev);
1834 if (r) { 1670 if (r) {
@@ -1837,11 +1673,6 @@ static int rv770_startup(struct radeon_device *rdev)
1837 } 1673 }
1838 } 1674 }
1839 1675
1840 r = r600_vram_scratch_init(rdev);
1841 if (r)
1842 return r;
1843
1844 rv770_mc_program(rdev);
1845 if (rdev->flags & RADEON_IS_AGP) { 1676 if (rdev->flags & RADEON_IS_AGP) {
1846 rv770_agp_enable(rdev); 1677 rv770_agp_enable(rdev);
1847 } else { 1678 } else {
@@ -1851,12 +1682,6 @@ static int rv770_startup(struct radeon_device *rdev)
1851 } 1682 }
1852 1683
1853 rv770_gpu_init(rdev); 1684 rv770_gpu_init(rdev);
1854 r = r600_blit_init(rdev);
1855 if (r) {
1856 r600_blit_fini(rdev);
1857 rdev->asic->copy.copy = NULL;
1858 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1859 }
1860 1685
1861 /* allocate wb buffer */ 1686 /* allocate wb buffer */
1862 r = radeon_wb_init(rdev); 1687 r = radeon_wb_init(rdev);
@@ -1875,7 +1700,7 @@ static int rv770_startup(struct radeon_device *rdev)
1875 return r; 1700 return r;
1876 } 1701 }
1877 1702
1878 r = rv770_uvd_resume(rdev); 1703 r = uvd_v2_2_resume(rdev);
1879 if (!r) { 1704 if (!r) {
1880 r = radeon_fence_driver_start_ring(rdev, 1705 r = radeon_fence_driver_start_ring(rdev,
1881 R600_RING_TYPE_UVD_INDEX); 1706 R600_RING_TYPE_UVD_INDEX);
@@ -1904,14 +1729,14 @@ static int rv770_startup(struct radeon_device *rdev)
1904 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1729 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1905 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1730 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
1906 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 1731 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
1907 0, 0xfffff, RADEON_CP_PACKET2); 1732 RADEON_CP_PACKET2);
1908 if (r) 1733 if (r)
1909 return r; 1734 return r;
1910 1735
1911 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1736 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1912 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 1737 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1913 DMA_RB_RPTR, DMA_RB_WPTR, 1738 DMA_RB_RPTR, DMA_RB_WPTR,
1914 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1739 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1915 if (r) 1740 if (r)
1916 return r; 1741 return r;
1917 1742
@@ -1928,12 +1753,11 @@ static int rv770_startup(struct radeon_device *rdev)
1928 1753
1929 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 1754 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
1930 if (ring->ring_size) { 1755 if (ring->ring_size) {
1931 r = radeon_ring_init(rdev, ring, ring->ring_size, 1756 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
1932 R600_WB_UVD_RPTR_OFFSET,
1933 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 1757 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
1934 0, 0xfffff, RADEON_CP_PACKET2); 1758 RADEON_CP_PACKET2);
1935 if (!r) 1759 if (!r)
1936 r = r600_uvd_init(rdev); 1760 r = uvd_v1_0_init(rdev);
1937 1761
1938 if (r) 1762 if (r)
1939 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 1763 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
@@ -1983,6 +1807,7 @@ int rv770_resume(struct radeon_device *rdev)
1983int rv770_suspend(struct radeon_device *rdev) 1807int rv770_suspend(struct radeon_device *rdev)
1984{ 1808{
1985 r600_audio_fini(rdev); 1809 r600_audio_fini(rdev);
1810 uvd_v1_0_fini(rdev);
1986 radeon_uvd_suspend(rdev); 1811 radeon_uvd_suspend(rdev);
1987 r700_cp_stop(rdev); 1812 r700_cp_stop(rdev);
1988 r600_dma_stop(rdev); 1813 r600_dma_stop(rdev);
@@ -2090,7 +1915,6 @@ int rv770_init(struct radeon_device *rdev)
2090 1915
2091void rv770_fini(struct radeon_device *rdev) 1916void rv770_fini(struct radeon_device *rdev)
2092{ 1917{
2093 r600_blit_fini(rdev);
2094 r700_cp_fini(rdev); 1918 r700_cp_fini(rdev);
2095 r600_dma_fini(rdev); 1919 r600_dma_fini(rdev);
2096 r600_irq_fini(rdev); 1920 r600_irq_fini(rdev);
@@ -2098,6 +1922,7 @@ void rv770_fini(struct radeon_device *rdev)
2098 radeon_ib_pool_fini(rdev); 1922 radeon_ib_pool_fini(rdev);
2099 radeon_irq_kms_fini(rdev); 1923 radeon_irq_kms_fini(rdev);
2100 rv770_pcie_gart_fini(rdev); 1924 rv770_pcie_gart_fini(rdev);
1925 uvd_v1_0_fini(rdev);
2101 radeon_uvd_fini(rdev); 1926 radeon_uvd_fini(rdev);
2102 r600_vram_scratch_fini(rdev); 1927 r600_vram_scratch_fini(rdev);
2103 radeon_gem_fini(rdev); 1928 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
new file mode 100644
index 000000000000..f9b02e3d6830
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "rv770d.h"
28
29/**
30 * rv770_copy_dma - copy pages using the DMA engine
31 *
32 * @rdev: radeon_device pointer
33 * @src_offset: src GPU address
34 * @dst_offset: dst GPU address
35 * @num_gpu_pages: number of GPU pages to xfer
36 * @fence: radeon fence object
37 *
38 * Copy GPU paging using the DMA engine (r7xx).
39 * Used by the radeon ttm implementation to move pages if
40 * registered as the asic copy callback.
41 */
42int rv770_copy_dma(struct radeon_device *rdev,
43 uint64_t src_offset, uint64_t dst_offset,
44 unsigned num_gpu_pages,
45 struct radeon_fence **fence)
46{
47 struct radeon_semaphore *sem = NULL;
48 int ring_index = rdev->asic->copy.dma_ring_index;
49 struct radeon_ring *ring = &rdev->ring[ring_index];
50 u32 size_in_dw, cur_size_in_dw;
51 int i, num_loops;
52 int r = 0;
53
54 r = radeon_semaphore_create(rdev, &sem);
55 if (r) {
56 DRM_ERROR("radeon: moving bo (%d).\n", r);
57 return r;
58 }
59
60 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
61 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
62 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
63 if (r) {
64 DRM_ERROR("radeon: moving bo (%d).\n", r);
65 radeon_semaphore_free(rdev, &sem, NULL);
66 return r;
67 }
68
69 if (radeon_fence_need_sync(*fence, ring->idx)) {
70 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
71 ring->idx);
72 radeon_fence_note_sync(*fence, ring->idx);
73 } else {
74 radeon_semaphore_free(rdev, &sem, NULL);
75 }
76
77 for (i = 0; i < num_loops; i++) {
78 cur_size_in_dw = size_in_dw;
79 if (cur_size_in_dw > 0xFFFF)
80 cur_size_in_dw = 0xFFFF;
81 size_in_dw -= cur_size_in_dw;
82 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
83 radeon_ring_write(ring, dst_offset & 0xfffffffc);
84 radeon_ring_write(ring, src_offset & 0xfffffffc);
85 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
86 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
87 src_offset += cur_size_in_dw * 4;
88 dst_offset += cur_size_in_dw * 4;
89 }
90
91 r = radeon_fence_emit(rdev, fence, ring->idx);
92 if (r) {
93 radeon_ring_unlock_undo(rdev, ring);
94 return r;
95 }
96
97 radeon_ring_unlock_commit(rdev, ring);
98 radeon_semaphore_free(rdev, &sem, *fence);
99
100 return r;
101}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 2d347925f77d..8cbb85dae5aa 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2294,6 +2294,7 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
2294 (power_state->v1.ucNonClockStateIndex * 2294 (power_state->v1.ucNonClockStateIndex *
2295 power_info->pplib.ucNonClockSize)); 2295 power_info->pplib.ucNonClockSize));
2296 if (power_info->pplib.ucStateEntrySize - 1) { 2296 if (power_info->pplib.ucStateEntrySize - 1) {
2297 u8 *idx;
2297 ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL); 2298 ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL);
2298 if (ps == NULL) { 2299 if (ps == NULL) {
2299 kfree(rdev->pm.dpm.ps); 2300 kfree(rdev->pm.dpm.ps);
@@ -2303,12 +2304,12 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
2303 rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 2304 rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2304 non_clock_info, 2305 non_clock_info,
2305 power_info->pplib.ucNonClockSize); 2306 power_info->pplib.ucNonClockSize);
2307 idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
2306 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 2308 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2307 clock_info = (union pplib_clock_info *) 2309 clock_info = (union pplib_clock_info *)
2308 (mode_info->atom_context->bios + data_offset + 2310 (mode_info->atom_context->bios + data_offset +
2309 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 2311 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2310 (power_state->v1.ucClockStateIndices[j] * 2312 (idx[j] * power_info->pplib.ucClockInfoSize));
2311 power_info->pplib.ucClockInfoSize));
2312 rv7xx_parse_pplib_clock_info(rdev, 2313 rv7xx_parse_pplib_clock_info(rdev,
2313 &rdev->pm.dpm.ps[i], j, 2314 &rdev->pm.dpm.ps[i], j,
2314 clock_info); 2315 clock_info);
@@ -2319,12 +2320,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
2319 return 0; 2320 return 0;
2320} 2321}
2321 2322
2323void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2324{
2325 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2326 struct radeon_atom_ss ss;
2327
2328 pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2329 ASIC_INTERNAL_ENGINE_SS, 0);
2330 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2331 ASIC_INTERNAL_MEMORY_SS, 0);
2332
2333 if (pi->sclk_ss || pi->mclk_ss)
2334 pi->dynamic_ss = true;
2335 else
2336 pi->dynamic_ss = false;
2337}
2338
2322int rv770_dpm_init(struct radeon_device *rdev) 2339int rv770_dpm_init(struct radeon_device *rdev)
2323{ 2340{
2324 struct rv7xx_power_info *pi; 2341 struct rv7xx_power_info *pi;
2325 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2326 uint16_t data_offset, size;
2327 uint8_t frev, crev;
2328 struct atom_clock_dividers dividers; 2342 struct atom_clock_dividers dividers;
2329 int ret; 2343 int ret;
2330 2344
@@ -2369,16 +2383,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
2369 pi->mvdd_control = 2383 pi->mvdd_control =
2370 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); 2384 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2371 2385
2372 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 2386 rv770_get_engine_memory_ss(rdev);
2373 &frev, &crev, &data_offset)) {
2374 pi->sclk_ss = true;
2375 pi->mclk_ss = true;
2376 pi->dynamic_ss = true;
2377 } else {
2378 pi->sclk_ss = false;
2379 pi->mclk_ss = false;
2380 pi->dynamic_ss = false;
2381 }
2382 2387
2383 pi->asi = RV770_ASI_DFLT; 2388 pi->asi = RV770_ASI_DFLT;
2384 pi->pasi = RV770_HASI_DFLT; 2389 pi->pasi = RV770_HASI_DFLT;
@@ -2393,8 +2398,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
2393 2398
2394 pi->dynamic_pcie_gen2 = true; 2399 pi->dynamic_pcie_gen2 = true;
2395 2400
2396 if (pi->gfx_clock_gating && 2401 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
2397 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2398 pi->thermal_protection = true; 2402 pi->thermal_protection = true;
2399 else 2403 else
2400 pi->thermal_protection = false; 2404 pi->thermal_protection = false;
@@ -2514,8 +2518,16 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
2514bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) 2518bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
2515{ 2519{
2516 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 2520 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
2521 u32 switch_limit = 300;
2522
2523 /* quirks */
2524 /* ASUS K70AF */
2525 if ((rdev->pdev->device == 0x9553) &&
2526 (rdev->pdev->subsystem_vendor == 0x1043) &&
2527 (rdev->pdev->subsystem_device == 0x1c42))
2528 switch_limit = 200;
2517 2529
2518 if (vblank_time < 300) 2530 if (vblank_time < switch_limit)
2519 return true; 2531 return true;
2520 else 2532 else
2521 return false; 2533 return false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index 96b1b2a62a8a..9244effc6b59 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
275void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, 275void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
276 struct radeon_ps *new_ps, 276 struct radeon_ps *new_ps,
277 struct radeon_ps *old_ps); 277 struct radeon_ps *old_ps);
278void rv770_get_engine_memory_ss(struct radeon_device *rdev);
278 279
279/* smc */ 280/* smc */
280int rv770_read_smc_soft_register(struct radeon_device *rdev, 281int rv770_read_smc_soft_register(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 6bef2b7d601b..9fe60e542922 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -971,7 +971,21 @@
971# define TARGET_LINK_SPEED_MASK (0xf << 0) 971# define TARGET_LINK_SPEED_MASK (0xf << 0)
972# define SELECTABLE_DEEMPHASIS (1 << 6) 972# define SELECTABLE_DEEMPHASIS (1 << 6)
973 973
974/*
975 * PM4
976 */
977#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
978 (((reg) >> 2) & 0xFFFF) | \
979 ((n) & 0x3FFF) << 16)
980#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
981 (((op) & 0xFF) << 8) | \
982 ((n) & 0x3FFF) << 16)
983
974/* UVD */ 984/* UVD */
985#define UVD_GPCOM_VCPU_CMD 0xef0c
986#define UVD_GPCOM_VCPU_DATA0 0xef10
987#define UVD_GPCOM_VCPU_DATA1 0xef14
988
975#define UVD_LMI_EXT40_ADDR 0xf498 989#define UVD_LMI_EXT40_ADDR 0xf498
976#define UVD_VCPU_CHIP_ID 0xf4d4 990#define UVD_VCPU_CHIP_ID 0xf4d4
977#define UVD_VCPU_CACHE_OFFSET0 0xf4d8 991#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
@@ -985,4 +999,6 @@
985#define UVD_RBC_RB_RPTR 0xf690 999#define UVD_RBC_RB_RPTR 0xf690
986#define UVD_RBC_RB_WPTR 0xf694 1000#define UVD_RBC_RB_WPTR 0xf694
987 1001
1002#define UVD_CONTEXT_ID 0xf6f4
1003
988#endif 1004#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d325280e2f9f..3e23b757dcfa 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -68,6 +68,8 @@ MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
68 68
69static void si_pcie_gen3_enable(struct radeon_device *rdev); 69static void si_pcie_gen3_enable(struct radeon_device *rdev);
70static void si_program_aspm(struct radeon_device *rdev); 70static void si_program_aspm(struct radeon_device *rdev);
71extern void sumo_rlc_fini(struct radeon_device *rdev);
72extern int sumo_rlc_init(struct radeon_device *rdev);
71extern int r600_ih_ring_alloc(struct radeon_device *rdev); 73extern int r600_ih_ring_alloc(struct radeon_device *rdev);
72extern void r600_ih_ring_fini(struct radeon_device *rdev); 74extern void r600_ih_ring_fini(struct radeon_device *rdev);
73extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 75extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
@@ -76,6 +78,11 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
76extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); 78extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
77extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 79extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
78extern bool evergreen_is_display_hung(struct radeon_device *rdev); 80extern bool evergreen_is_display_hung(struct radeon_device *rdev);
81extern void si_dma_vm_set_page(struct radeon_device *rdev,
82 struct radeon_ib *ib,
83 uint64_t pe,
84 uint64_t addr, unsigned count,
85 uint32_t incr, uint32_t flags);
79 86
80static const u32 verde_rlc_save_restore_register_list[] = 87static const u32 verde_rlc_save_restore_register_list[] =
81{ 88{
@@ -1663,9 +1670,13 @@ static int si_init_microcode(struct radeon_device *rdev)
1663 1670
1664 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 1671 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1665 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 1672 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1666 if (err) 1673 if (err) {
1667 goto out; 1674 printk(KERN_ERR
1668 if (rdev->smc_fw->size != smc_req_size) { 1675 "smc: error loading firmware \"%s\"\n",
1676 fw_name);
1677 release_firmware(rdev->smc_fw);
1678 rdev->smc_fw = NULL;
1679 } else if (rdev->smc_fw->size != smc_req_size) {
1669 printk(KERN_ERR 1680 printk(KERN_ERR
1670 "si_smc: Bogus length %zu in firmware \"%s\"\n", 1681 "si_smc: Bogus length %zu in firmware \"%s\"\n",
1671 rdev->smc_fw->size, fw_name); 1682 rdev->smc_fw->size, fw_name);
@@ -1700,7 +1711,8 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1700 struct drm_display_mode *mode, 1711 struct drm_display_mode *mode,
1701 struct drm_display_mode *other_mode) 1712 struct drm_display_mode *other_mode)
1702{ 1713{
1703 u32 tmp; 1714 u32 tmp, buffer_alloc, i;
1715 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1704 /* 1716 /*
1705 * Line Buffer Setup 1717 * Line Buffer Setup
1706 * There are 3 line buffers, each one shared by 2 display controllers. 1718 * There are 3 line buffers, each one shared by 2 display controllers.
@@ -1715,16 +1727,30 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1715 * non-linked crtcs for maximum line buffer allocation. 1727 * non-linked crtcs for maximum line buffer allocation.
1716 */ 1728 */
1717 if (radeon_crtc->base.enabled && mode) { 1729 if (radeon_crtc->base.enabled && mode) {
1718 if (other_mode) 1730 if (other_mode) {
1719 tmp = 0; /* 1/2 */ 1731 tmp = 0; /* 1/2 */
1720 else 1732 buffer_alloc = 1;
1733 } else {
1721 tmp = 2; /* whole */ 1734 tmp = 2; /* whole */
1722 } else 1735 buffer_alloc = 2;
1736 }
1737 } else {
1723 tmp = 0; 1738 tmp = 0;
1739 buffer_alloc = 0;
1740 }
1724 1741
1725 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, 1742 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
1726 DC_LB_MEMORY_CONFIG(tmp)); 1743 DC_LB_MEMORY_CONFIG(tmp));
1727 1744
1745 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1746 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1747 for (i = 0; i < rdev->usec_timeout; i++) {
1748 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1749 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1750 break;
1751 udelay(1);
1752 }
1753
1728 if (radeon_crtc->base.enabled && mode) { 1754 if (radeon_crtc->base.enabled && mode) {
1729 switch (tmp) { 1755 switch (tmp) {
1730 case 0: 1756 case 0:
@@ -3360,17 +3386,6 @@ static int si_cp_resume(struct radeon_device *rdev)
3360 u32 rb_bufsz; 3386 u32 rb_bufsz;
3361 int r; 3387 int r;
3362 3388
3363 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
3364 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
3365 SOFT_RESET_PA |
3366 SOFT_RESET_VGT |
3367 SOFT_RESET_SPI |
3368 SOFT_RESET_SX));
3369 RREG32(GRBM_SOFT_RESET);
3370 mdelay(15);
3371 WREG32(GRBM_SOFT_RESET, 0);
3372 RREG32(GRBM_SOFT_RESET);
3373
3374 WREG32(CP_SEM_WAIT_TIMER, 0x0); 3389 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3375 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 3390 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3376 3391
@@ -3383,8 +3398,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3383 /* ring 0 - compute and gfx */ 3398 /* ring 0 - compute and gfx */
3384 /* Set ring buffer size */ 3399 /* Set ring buffer size */
3385 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3400 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3386 rb_bufsz = drm_order(ring->ring_size / 8); 3401 rb_bufsz = order_base_2(ring->ring_size / 8);
3387 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3402 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3388#ifdef __BIG_ENDIAN 3403#ifdef __BIG_ENDIAN
3389 tmp |= BUF_SWAP_32BIT; 3404 tmp |= BUF_SWAP_32BIT;
3390#endif 3405#endif
@@ -3416,8 +3431,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3416 /* ring1 - compute only */ 3431 /* ring1 - compute only */
3417 /* Set ring buffer size */ 3432 /* Set ring buffer size */
3418 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 3433 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3419 rb_bufsz = drm_order(ring->ring_size / 8); 3434 rb_bufsz = order_base_2(ring->ring_size / 8);
3420 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3435 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3421#ifdef __BIG_ENDIAN 3436#ifdef __BIG_ENDIAN
3422 tmp |= BUF_SWAP_32BIT; 3437 tmp |= BUF_SWAP_32BIT;
3423#endif 3438#endif
@@ -3442,8 +3457,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3442 /* ring2 - compute only */ 3457 /* ring2 - compute only */
3443 /* Set ring buffer size */ 3458 /* Set ring buffer size */
3444 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 3459 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3445 rb_bufsz = drm_order(ring->ring_size / 8); 3460 rb_bufsz = order_base_2(ring->ring_size / 8);
3446 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3461 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3447#ifdef __BIG_ENDIAN 3462#ifdef __BIG_ENDIAN
3448 tmp |= BUF_SWAP_32BIT; 3463 tmp |= BUF_SWAP_32BIT;
3449#endif 3464#endif
@@ -3489,7 +3504,7 @@ static int si_cp_resume(struct radeon_device *rdev)
3489 return 0; 3504 return 0;
3490} 3505}
3491 3506
3492static u32 si_gpu_check_soft_reset(struct radeon_device *rdev) 3507u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
3493{ 3508{
3494 u32 reset_mask = 0; 3509 u32 reset_mask = 0;
3495 u32 tmp; 3510 u32 tmp;
@@ -3738,34 +3753,6 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
3738 return radeon_ring_test_lockup(rdev, ring); 3753 return radeon_ring_test_lockup(rdev, ring);
3739} 3754}
3740 3755
3741/**
3742 * si_dma_is_lockup - Check if the DMA engine is locked up
3743 *
3744 * @rdev: radeon_device pointer
3745 * @ring: radeon_ring structure holding ring information
3746 *
3747 * Check if the async DMA engine is locked up.
3748 * Returns true if the engine appears to be locked up, false if not.
3749 */
3750bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
3751{
3752 u32 reset_mask = si_gpu_check_soft_reset(rdev);
3753 u32 mask;
3754
3755 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
3756 mask = RADEON_RESET_DMA;
3757 else
3758 mask = RADEON_RESET_DMA1;
3759
3760 if (!(reset_mask & mask)) {
3761 radeon_ring_lockup_update(ring);
3762 return false;
3763 }
3764 /* force ring activities */
3765 radeon_ring_force_activity(rdev, ring);
3766 return radeon_ring_test_lockup(rdev, ring);
3767}
3768
3769/* MC */ 3756/* MC */
3770static void si_mc_program(struct radeon_device *rdev) 3757static void si_mc_program(struct radeon_device *rdev)
3771{ 3758{
@@ -4079,13 +4066,64 @@ static int si_vm_packet3_ce_check(struct radeon_device *rdev,
4079 return 0; 4066 return 0;
4080} 4067}
4081 4068
4069static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
4070{
4071 u32 start_reg, reg, i;
4072 u32 command = ib[idx + 4];
4073 u32 info = ib[idx + 1];
4074 u32 idx_value = ib[idx];
4075 if (command & PACKET3_CP_DMA_CMD_SAS) {
4076 /* src address space is register */
4077 if (((info & 0x60000000) >> 29) == 0) {
4078 start_reg = idx_value << 2;
4079 if (command & PACKET3_CP_DMA_CMD_SAIC) {
4080 reg = start_reg;
4081 if (!si_vm_reg_valid(reg)) {
4082 DRM_ERROR("CP DMA Bad SRC register\n");
4083 return -EINVAL;
4084 }
4085 } else {
4086 for (i = 0; i < (command & 0x1fffff); i++) {
4087 reg = start_reg + (4 * i);
4088 if (!si_vm_reg_valid(reg)) {
4089 DRM_ERROR("CP DMA Bad SRC register\n");
4090 return -EINVAL;
4091 }
4092 }
4093 }
4094 }
4095 }
4096 if (command & PACKET3_CP_DMA_CMD_DAS) {
4097 /* dst address space is register */
4098 if (((info & 0x00300000) >> 20) == 0) {
4099 start_reg = ib[idx + 2];
4100 if (command & PACKET3_CP_DMA_CMD_DAIC) {
4101 reg = start_reg;
4102 if (!si_vm_reg_valid(reg)) {
4103 DRM_ERROR("CP DMA Bad DST register\n");
4104 return -EINVAL;
4105 }
4106 } else {
4107 for (i = 0; i < (command & 0x1fffff); i++) {
4108 reg = start_reg + (4 * i);
4109 if (!si_vm_reg_valid(reg)) {
4110 DRM_ERROR("CP DMA Bad DST register\n");
4111 return -EINVAL;
4112 }
4113 }
4114 }
4115 }
4116 }
4117 return 0;
4118}
4119
4082static int si_vm_packet3_gfx_check(struct radeon_device *rdev, 4120static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4083 u32 *ib, struct radeon_cs_packet *pkt) 4121 u32 *ib, struct radeon_cs_packet *pkt)
4084{ 4122{
4123 int r;
4085 u32 idx = pkt->idx + 1; 4124 u32 idx = pkt->idx + 1;
4086 u32 idx_value = ib[idx]; 4125 u32 idx_value = ib[idx];
4087 u32 start_reg, end_reg, reg, i; 4126 u32 start_reg, end_reg, reg, i;
4088 u32 command, info;
4089 4127
4090 switch (pkt->opcode) { 4128 switch (pkt->opcode) {
4091 case PACKET3_NOP: 4129 case PACKET3_NOP:
@@ -4186,50 +4224,9 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4186 } 4224 }
4187 break; 4225 break;
4188 case PACKET3_CP_DMA: 4226 case PACKET3_CP_DMA:
4189 command = ib[idx + 4]; 4227 r = si_vm_packet3_cp_dma_check(ib, idx);
4190 info = ib[idx + 1]; 4228 if (r)
4191 if (command & PACKET3_CP_DMA_CMD_SAS) { 4229 return r;
4192 /* src address space is register */
4193 if (((info & 0x60000000) >> 29) == 0) {
4194 start_reg = idx_value << 2;
4195 if (command & PACKET3_CP_DMA_CMD_SAIC) {
4196 reg = start_reg;
4197 if (!si_vm_reg_valid(reg)) {
4198 DRM_ERROR("CP DMA Bad SRC register\n");
4199 return -EINVAL;
4200 }
4201 } else {
4202 for (i = 0; i < (command & 0x1fffff); i++) {
4203 reg = start_reg + (4 * i);
4204 if (!si_vm_reg_valid(reg)) {
4205 DRM_ERROR("CP DMA Bad SRC register\n");
4206 return -EINVAL;
4207 }
4208 }
4209 }
4210 }
4211 }
4212 if (command & PACKET3_CP_DMA_CMD_DAS) {
4213 /* dst address space is register */
4214 if (((info & 0x00300000) >> 20) == 0) {
4215 start_reg = ib[idx + 2];
4216 if (command & PACKET3_CP_DMA_CMD_DAIC) {
4217 reg = start_reg;
4218 if (!si_vm_reg_valid(reg)) {
4219 DRM_ERROR("CP DMA Bad DST register\n");
4220 return -EINVAL;
4221 }
4222 } else {
4223 for (i = 0; i < (command & 0x1fffff); i++) {
4224 reg = start_reg + (4 * i);
4225 if (!si_vm_reg_valid(reg)) {
4226 DRM_ERROR("CP DMA Bad DST register\n");
4227 return -EINVAL;
4228 }
4229 }
4230 }
4231 }
4232 }
4233 break; 4230 break;
4234 default: 4231 default:
4235 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); 4232 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
@@ -4241,6 +4238,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4241static int si_vm_packet3_compute_check(struct radeon_device *rdev, 4238static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4242 u32 *ib, struct radeon_cs_packet *pkt) 4239 u32 *ib, struct radeon_cs_packet *pkt)
4243{ 4240{
4241 int r;
4244 u32 idx = pkt->idx + 1; 4242 u32 idx = pkt->idx + 1;
4245 u32 idx_value = ib[idx]; 4243 u32 idx_value = ib[idx];
4246 u32 start_reg, reg, i; 4244 u32 start_reg, reg, i;
@@ -4313,6 +4311,11 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4313 return -EINVAL; 4311 return -EINVAL;
4314 } 4312 }
4315 break; 4313 break;
4314 case PACKET3_CP_DMA:
4315 r = si_vm_packet3_cp_dma_check(ib, idx);
4316 if (r)
4317 return r;
4318 break;
4316 default: 4319 default:
4317 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode); 4320 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
4318 return -EINVAL; 4321 return -EINVAL;
@@ -4704,58 +4707,7 @@ void si_vm_set_page(struct radeon_device *rdev,
4704 } 4707 }
4705 } else { 4708 } else {
4706 /* DMA */ 4709 /* DMA */
4707 if (flags & RADEON_VM_PAGE_SYSTEM) { 4710 si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
4708 while (count) {
4709 ndw = count * 2;
4710 if (ndw > 0xFFFFE)
4711 ndw = 0xFFFFE;
4712
4713 /* for non-physically contiguous pages (system) */
4714 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
4715 ib->ptr[ib->length_dw++] = pe;
4716 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
4717 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
4718 if (flags & RADEON_VM_PAGE_SYSTEM) {
4719 value = radeon_vm_map_gart(rdev, addr);
4720 value &= 0xFFFFFFFFFFFFF000ULL;
4721 } else if (flags & RADEON_VM_PAGE_VALID) {
4722 value = addr;
4723 } else {
4724 value = 0;
4725 }
4726 addr += incr;
4727 value |= r600_flags;
4728 ib->ptr[ib->length_dw++] = value;
4729 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4730 }
4731 }
4732 } else {
4733 while (count) {
4734 ndw = count * 2;
4735 if (ndw > 0xFFFFE)
4736 ndw = 0xFFFFE;
4737
4738 if (flags & RADEON_VM_PAGE_VALID)
4739 value = addr;
4740 else
4741 value = 0;
4742 /* for physically contiguous pages (vram) */
4743 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
4744 ib->ptr[ib->length_dw++] = pe; /* dst addr */
4745 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
4746 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
4747 ib->ptr[ib->length_dw++] = 0;
4748 ib->ptr[ib->length_dw++] = value; /* value */
4749 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4750 ib->ptr[ib->length_dw++] = incr; /* increment size */
4751 ib->ptr[ib->length_dw++] = 0;
4752 pe += ndw * 4;
4753 addr += (ndw / 2) * incr;
4754 count -= ndw / 2;
4755 }
4756 }
4757 while (ib->length_dw & 0x7)
4758 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
4759 } 4711 }
4760} 4712}
4761 4713
@@ -4802,32 +4754,6 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4802 radeon_ring_write(ring, 0x0); 4754 radeon_ring_write(ring, 0x0);
4803} 4755}
4804 4756
4805void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4806{
4807 struct radeon_ring *ring = &rdev->ring[ridx];
4808
4809 if (vm == NULL)
4810 return;
4811
4812 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
4813 if (vm->id < 8) {
4814 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
4815 } else {
4816 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
4817 }
4818 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
4819
4820 /* flush hdp cache */
4821 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
4822 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
4823 radeon_ring_write(ring, 1);
4824
4825 /* bits 0-7 are the VM contexts0-7 */
4826 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
4827 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
4828 radeon_ring_write(ring, 1 << vm->id);
4829}
4830
4831/* 4757/*
4832 * Power and clock gating 4758 * Power and clock gating
4833 */ 4759 */
@@ -4895,7 +4821,7 @@ static void si_set_uvd_dcm(struct radeon_device *rdev,
4895 WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2); 4821 WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
4896} 4822}
4897 4823
4898static void si_init_uvd_internal_cg(struct radeon_device *rdev) 4824void si_init_uvd_internal_cg(struct radeon_device *rdev)
4899{ 4825{
4900 bool hw_mode = true; 4826 bool hw_mode = true;
4901 4827
@@ -4938,7 +4864,7 @@ static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
4938 u32 data, orig; 4864 u32 data, orig;
4939 4865
4940 orig = data = RREG32(DMA_PG); 4866 orig = data = RREG32(DMA_PG);
4941 if (enable) 4867 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
4942 data |= PG_CNTL_ENABLE; 4868 data |= PG_CNTL_ENABLE;
4943 else 4869 else
4944 data &= ~PG_CNTL_ENABLE; 4870 data &= ~PG_CNTL_ENABLE;
@@ -4962,7 +4888,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
4962{ 4888{
4963 u32 tmp; 4889 u32 tmp;
4964 4890
4965 if (enable) { 4891 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
4966 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); 4892 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
4967 WREG32(RLC_TTOP_D, tmp); 4893 WREG32(RLC_TTOP_D, tmp);
4968 4894
@@ -5065,9 +4991,9 @@ static void si_enable_cgcg(struct radeon_device *rdev,
5065 4991
5066 orig = data = RREG32(RLC_CGCG_CGLS_CTRL); 4992 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5067 4993
5068 si_enable_gui_idle_interrupt(rdev, enable); 4994 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
4995 si_enable_gui_idle_interrupt(rdev, true);
5069 4996
5070 if (enable) {
5071 WREG32(RLC_GCPM_GENERAL_3, 0x00000080); 4997 WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
5072 4998
5073 tmp = si_halt_rlc(rdev); 4999 tmp = si_halt_rlc(rdev);
@@ -5084,6 +5010,8 @@ static void si_enable_cgcg(struct radeon_device *rdev,
5084 5010
5085 data |= CGCG_EN | CGLS_EN; 5011 data |= CGCG_EN | CGLS_EN;
5086 } else { 5012 } else {
5013 si_enable_gui_idle_interrupt(rdev, false);
5014
5087 RREG32(CB_CGTT_SCLK_CTRL); 5015 RREG32(CB_CGTT_SCLK_CTRL);
5088 RREG32(CB_CGTT_SCLK_CTRL); 5016 RREG32(CB_CGTT_SCLK_CTRL);
5089 RREG32(CB_CGTT_SCLK_CTRL); 5017 RREG32(CB_CGTT_SCLK_CTRL);
@@ -5101,16 +5029,18 @@ static void si_enable_mgcg(struct radeon_device *rdev,
5101{ 5029{
5102 u32 data, orig, tmp = 0; 5030 u32 data, orig, tmp = 0;
5103 5031
5104 if (enable) { 5032 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5105 orig = data = RREG32(CGTS_SM_CTRL_REG); 5033 orig = data = RREG32(CGTS_SM_CTRL_REG);
5106 data = 0x96940200; 5034 data = 0x96940200;
5107 if (orig != data) 5035 if (orig != data)
5108 WREG32(CGTS_SM_CTRL_REG, data); 5036 WREG32(CGTS_SM_CTRL_REG, data);
5109 5037
5110 orig = data = RREG32(CP_MEM_SLP_CNTL); 5038 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5111 data |= CP_MEM_LS_EN; 5039 orig = data = RREG32(CP_MEM_SLP_CNTL);
5112 if (orig != data) 5040 data |= CP_MEM_LS_EN;
5113 WREG32(CP_MEM_SLP_CNTL, data); 5041 if (orig != data)
5042 WREG32(CP_MEM_SLP_CNTL, data);
5043 }
5114 5044
5115 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); 5045 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5116 data &= 0xffffffc0; 5046 data &= 0xffffffc0;
@@ -5155,7 +5085,7 @@ static void si_enable_uvd_mgcg(struct radeon_device *rdev,
5155{ 5085{
5156 u32 orig, data, tmp; 5086 u32 orig, data, tmp;
5157 5087
5158 if (enable) { 5088 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
5159 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL); 5089 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5160 tmp |= 0x3fff; 5090 tmp |= 0x3fff;
5161 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp); 5091 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
@@ -5203,7 +5133,7 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
5203 5133
5204 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { 5134 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5205 orig = data = RREG32(mc_cg_registers[i]); 5135 orig = data = RREG32(mc_cg_registers[i]);
5206 if (enable) 5136 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5207 data |= MC_LS_ENABLE; 5137 data |= MC_LS_ENABLE;
5208 else 5138 else
5209 data &= ~MC_LS_ENABLE; 5139 data &= ~MC_LS_ENABLE;
@@ -5212,230 +5142,295 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
5212 } 5142 }
5213} 5143}
5214 5144
5215 5145static void si_enable_mc_mgcg(struct radeon_device *rdev,
5216static void si_init_cg(struct radeon_device *rdev) 5146 bool enable)
5217{ 5147{
5218 bool has_uvd = true; 5148 int i;
5149 u32 orig, data;
5219 5150
5220 si_enable_mgcg(rdev, true); 5151 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5221 si_enable_cgcg(rdev, true); 5152 orig = data = RREG32(mc_cg_registers[i]);
5222 /* disable MC LS on Tahiti */ 5153 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5223 if (rdev->family == CHIP_TAHITI) 5154 data |= MC_CG_ENABLE;
5224 si_enable_mc_ls(rdev, false); 5155 else
5225 if (has_uvd) { 5156 data &= ~MC_CG_ENABLE;
5226 si_enable_uvd_mgcg(rdev, true); 5157 if (data != orig)
5227 si_init_uvd_internal_cg(rdev); 5158 WREG32(mc_cg_registers[i], data);
5228 } 5159 }
5229} 5160}
5230 5161
5231static void si_fini_cg(struct radeon_device *rdev) 5162static void si_enable_dma_mgcg(struct radeon_device *rdev,
5163 bool enable)
5232{ 5164{
5233 bool has_uvd = true; 5165 u32 orig, data, offset;
5166 int i;
5234 5167
5235 if (has_uvd) 5168 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5236 si_enable_uvd_mgcg(rdev, false); 5169 for (i = 0; i < 2; i++) {
5237 si_enable_cgcg(rdev, false); 5170 if (i == 0)
5238 si_enable_mgcg(rdev, false); 5171 offset = DMA0_REGISTER_OFFSET;
5172 else
5173 offset = DMA1_REGISTER_OFFSET;
5174 orig = data = RREG32(DMA_POWER_CNTL + offset);
5175 data &= ~MEM_POWER_OVERRIDE;
5176 if (data != orig)
5177 WREG32(DMA_POWER_CNTL + offset, data);
5178 WREG32(DMA_CLK_CTRL + offset, 0x00000100);
5179 }
5180 } else {
5181 for (i = 0; i < 2; i++) {
5182 if (i == 0)
5183 offset = DMA0_REGISTER_OFFSET;
5184 else
5185 offset = DMA1_REGISTER_OFFSET;
5186 orig = data = RREG32(DMA_POWER_CNTL + offset);
5187 data |= MEM_POWER_OVERRIDE;
5188 if (data != orig)
5189 WREG32(DMA_POWER_CNTL + offset, data);
5190
5191 orig = data = RREG32(DMA_CLK_CTRL + offset);
5192 data = 0xff000000;
5193 if (data != orig)
5194 WREG32(DMA_CLK_CTRL + offset, data);
5195 }
5196 }
5239} 5197}
5240 5198
5241static void si_init_pg(struct radeon_device *rdev) 5199static void si_enable_bif_mgls(struct radeon_device *rdev,
5200 bool enable)
5242{ 5201{
5243 bool has_pg = false; 5202 u32 orig, data;
5244 5203
5245 /* only cape verde supports PG */ 5204 orig = data = RREG32_PCIE(PCIE_CNTL2);
5246 if (rdev->family == CHIP_VERDE)
5247 has_pg = true;
5248 5205
5249 if (has_pg) { 5206 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5250 si_init_ao_cu_mask(rdev); 5207 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5251 si_init_dma_pg(rdev); 5208 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5252 si_enable_dma_pg(rdev, true); 5209 else
5253 si_init_gfx_cgpg(rdev); 5210 data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5254 si_enable_gfx_cgpg(rdev, true); 5211 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5255 } else { 5212
5256 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); 5213 if (orig != data)
5257 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); 5214 WREG32_PCIE(PCIE_CNTL2, data);
5258 }
5259} 5215}
5260 5216
5261static void si_fini_pg(struct radeon_device *rdev) 5217static void si_enable_hdp_mgcg(struct radeon_device *rdev,
5218 bool enable)
5262{ 5219{
5263 bool has_pg = false; 5220 u32 orig, data;
5264 5221
5265 /* only cape verde supports PG */ 5222 orig = data = RREG32(HDP_HOST_PATH_CNTL);
5266 if (rdev->family == CHIP_VERDE)
5267 has_pg = true;
5268 5223
5269 if (has_pg) { 5224 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5270 si_enable_dma_pg(rdev, false); 5225 data &= ~CLOCK_GATING_DIS;
5271 si_enable_gfx_cgpg(rdev, false); 5226 else
5272 } 5227 data |= CLOCK_GATING_DIS;
5228
5229 if (orig != data)
5230 WREG32(HDP_HOST_PATH_CNTL, data);
5273} 5231}
5274 5232
5275/* 5233static void si_enable_hdp_ls(struct radeon_device *rdev,
5276 * RLC 5234 bool enable)
5277 */
5278void si_rlc_fini(struct radeon_device *rdev)
5279{ 5235{
5280 int r; 5236 u32 orig, data;
5281
5282 /* save restore block */
5283 if (rdev->rlc.save_restore_obj) {
5284 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
5285 if (unlikely(r != 0))
5286 dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
5287 radeon_bo_unpin(rdev->rlc.save_restore_obj);
5288 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
5289 5237
5290 radeon_bo_unref(&rdev->rlc.save_restore_obj); 5238 orig = data = RREG32(HDP_MEM_POWER_LS);
5291 rdev->rlc.save_restore_obj = NULL;
5292 }
5293 5239
5294 /* clear state block */ 5240 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5295 if (rdev->rlc.clear_state_obj) { 5241 data |= HDP_LS_ENABLE;
5296 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); 5242 else
5297 if (unlikely(r != 0)) 5243 data &= ~HDP_LS_ENABLE;
5298 dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
5299 radeon_bo_unpin(rdev->rlc.clear_state_obj);
5300 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
5301 5244
5302 radeon_bo_unref(&rdev->rlc.clear_state_obj); 5245 if (orig != data)
5303 rdev->rlc.clear_state_obj = NULL; 5246 WREG32(HDP_MEM_POWER_LS, data);
5304 }
5305} 5247}
5306 5248
5307#define RLC_CLEAR_STATE_END_MARKER 0x00000001 5249void si_update_cg(struct radeon_device *rdev,
5308 5250 u32 block, bool enable)
5309int si_rlc_init(struct radeon_device *rdev)
5310{ 5251{
5311 volatile u32 *dst_ptr; 5252 if (block & RADEON_CG_BLOCK_GFX) {
5312 u32 dws, data, i, j, k, reg_num; 5253 /* order matters! */
5313 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index; 5254 if (enable) {
5314 u64 reg_list_mc_addr; 5255 si_enable_mgcg(rdev, true);
5315 const struct cs_section_def *cs_data = si_cs_data; 5256 si_enable_cgcg(rdev, true);
5316 int r; 5257 } else {
5317 5258 si_enable_cgcg(rdev, false);
5318 /* save restore block */ 5259 si_enable_mgcg(rdev, false);
5319 if (rdev->rlc.save_restore_obj == NULL) {
5320 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
5321 RADEON_GEM_DOMAIN_VRAM, NULL,
5322 &rdev->rlc.save_restore_obj);
5323 if (r) {
5324 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
5325 return r;
5326 } 5260 }
5327 } 5261 }
5328 5262
5329 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); 5263 if (block & RADEON_CG_BLOCK_MC) {
5330 if (unlikely(r != 0)) { 5264 si_enable_mc_mgcg(rdev, enable);
5331 si_rlc_fini(rdev); 5265 si_enable_mc_ls(rdev, enable);
5332 return r;
5333 } 5266 }
5334 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 5267
5335 &rdev->rlc.save_restore_gpu_addr); 5268 if (block & RADEON_CG_BLOCK_SDMA) {
5336 if (r) { 5269 si_enable_dma_mgcg(rdev, enable);
5337 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
5338 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
5339 si_rlc_fini(rdev);
5340 return r;
5341 } 5270 }
5342 5271
5343 if (rdev->family == CHIP_VERDE) { 5272 if (block & RADEON_CG_BLOCK_BIF) {
5344 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr); 5273 si_enable_bif_mgls(rdev, enable);
5345 if (r) {
5346 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
5347 si_rlc_fini(rdev);
5348 return r;
5349 }
5350 /* write the sr buffer */
5351 dst_ptr = rdev->rlc.sr_ptr;
5352 for (i = 0; i < ARRAY_SIZE(verde_rlc_save_restore_register_list); i++) {
5353 dst_ptr[i] = verde_rlc_save_restore_register_list[i];
5354 }
5355 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
5356 } 5274 }
5357 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
5358 5275
5359 /* clear state block */ 5276 if (block & RADEON_CG_BLOCK_UVD) {
5360 reg_list_num = 0; 5277 if (rdev->has_uvd) {
5361 dws = 0; 5278 si_enable_uvd_mgcg(rdev, enable);
5362 for (i = 0; cs_data[i].section != NULL; i++) {
5363 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
5364 reg_list_num++;
5365 dws += cs_data[i].section[j].reg_count;
5366 } 5279 }
5367 } 5280 }
5368 reg_list_blk_index = (3 * reg_list_num + 2);
5369 dws += reg_list_blk_index;
5370 5281
5371 if (rdev->rlc.clear_state_obj == NULL) { 5282 if (block & RADEON_CG_BLOCK_HDP) {
5372 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 5283 si_enable_hdp_mgcg(rdev, enable);
5373 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); 5284 si_enable_hdp_ls(rdev, enable);
5374 if (r) {
5375 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
5376 si_rlc_fini(rdev);
5377 return r;
5378 }
5379 } 5285 }
5380 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); 5286}
5381 if (unlikely(r != 0)) { 5287
5382 si_rlc_fini(rdev); 5288static void si_init_cg(struct radeon_device *rdev)
5383 return r; 5289{
5290 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5291 RADEON_CG_BLOCK_MC |
5292 RADEON_CG_BLOCK_SDMA |
5293 RADEON_CG_BLOCK_BIF |
5294 RADEON_CG_BLOCK_HDP), true);
5295 if (rdev->has_uvd) {
5296 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
5297 si_init_uvd_internal_cg(rdev);
5384 } 5298 }
5385 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, 5299}
5386 &rdev->rlc.clear_state_gpu_addr);
5387 if (r) {
5388 5300
5389 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 5301static void si_fini_cg(struct radeon_device *rdev)
5390 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); 5302{
5391 si_rlc_fini(rdev); 5303 if (rdev->has_uvd) {
5392 return r; 5304 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
5393 } 5305 }
5394 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr); 5306 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5395 if (r) { 5307 RADEON_CG_BLOCK_MC |
5396 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); 5308 RADEON_CG_BLOCK_SDMA |
5397 si_rlc_fini(rdev); 5309 RADEON_CG_BLOCK_BIF |
5398 return r; 5310 RADEON_CG_BLOCK_HDP), false);
5311}
5312
5313u32 si_get_csb_size(struct radeon_device *rdev)
5314{
5315 u32 count = 0;
5316 const struct cs_section_def *sect = NULL;
5317 const struct cs_extent_def *ext = NULL;
5318
5319 if (rdev->rlc.cs_data == NULL)
5320 return 0;
5321
5322 /* begin clear state */
5323 count += 2;
5324 /* context control state */
5325 count += 3;
5326
5327 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5328 for (ext = sect->section; ext->extent != NULL; ++ext) {
5329 if (sect->id == SECT_CONTEXT)
5330 count += 2 + ext->reg_count;
5331 else
5332 return 0;
5333 }
5399 } 5334 }
5400 /* set up the cs buffer */ 5335 /* pa_sc_raster_config */
5401 dst_ptr = rdev->rlc.cs_ptr; 5336 count += 3;
5402 reg_list_hdr_blk_index = 0; 5337 /* end clear state */
5403 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); 5338 count += 2;
5404 data = upper_32_bits(reg_list_mc_addr); 5339 /* clear state */
5405 dst_ptr[reg_list_hdr_blk_index] = data; 5340 count += 2;
5406 reg_list_hdr_blk_index++; 5341
5407 for (i = 0; cs_data[i].section != NULL; i++) { 5342 return count;
5408 for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 5343}
5409 reg_num = cs_data[i].section[j].reg_count; 5344
5410 data = reg_list_mc_addr & 0xffffffff; 5345void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5411 dst_ptr[reg_list_hdr_blk_index] = data; 5346{
5412 reg_list_hdr_blk_index++; 5347 u32 count = 0, i;
5413 5348 const struct cs_section_def *sect = NULL;
5414 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; 5349 const struct cs_extent_def *ext = NULL;
5415 dst_ptr[reg_list_hdr_blk_index] = data; 5350
5416 reg_list_hdr_blk_index++; 5351 if (rdev->rlc.cs_data == NULL)
5417 5352 return;
5418 data = 0x08000000 | (reg_num * 4); 5353 if (buffer == NULL)
5419 dst_ptr[reg_list_hdr_blk_index] = data; 5354 return;
5420 reg_list_hdr_blk_index++; 5355
5421 5356 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
5422 for (k = 0; k < reg_num; k++) { 5357 buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
5423 data = cs_data[i].section[j].extent[k]; 5358
5424 dst_ptr[reg_list_blk_index + k] = data; 5359 buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
5360 buffer[count++] = 0x80000000;
5361 buffer[count++] = 0x80000000;
5362
5363 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5364 for (ext = sect->section; ext->extent != NULL; ++ext) {
5365 if (sect->id == SECT_CONTEXT) {
5366 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
5367 buffer[count++] = ext->reg_index - 0xa000;
5368 for (i = 0; i < ext->reg_count; i++)
5369 buffer[count++] = ext->extent[i];
5370 } else {
5371 return;
5425 } 5372 }
5426 reg_list_mc_addr += reg_num * 4;
5427 reg_list_blk_index += reg_num;
5428 } 5373 }
5429 } 5374 }
5430 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
5431 5375
5432 radeon_bo_kunmap(rdev->rlc.clear_state_obj); 5376 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
5433 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 5377 buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
5378 switch (rdev->family) {
5379 case CHIP_TAHITI:
5380 case CHIP_PITCAIRN:
5381 buffer[count++] = 0x2a00126a;
5382 break;
5383 case CHIP_VERDE:
5384 buffer[count++] = 0x0000124a;
5385 break;
5386 case CHIP_OLAND:
5387 buffer[count++] = 0x00000082;
5388 break;
5389 case CHIP_HAINAN:
5390 buffer[count++] = 0x00000000;
5391 break;
5392 default:
5393 buffer[count++] = 0x00000000;
5394 break;
5395 }
5434 5396
5435 return 0; 5397 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
5398 buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
5399
5400 buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
5401 buffer[count++] = 0;
5402}
5403
5404static void si_init_pg(struct radeon_device *rdev)
5405{
5406 if (rdev->pg_flags) {
5407 if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
5408 si_init_dma_pg(rdev);
5409 }
5410 si_init_ao_cu_mask(rdev);
5411 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
5412 si_init_gfx_cgpg(rdev);
5413 }
5414 si_enable_dma_pg(rdev, true);
5415 si_enable_gfx_cgpg(rdev, true);
5416 } else {
5417 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5418 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5419 }
5420}
5421
5422static void si_fini_pg(struct radeon_device *rdev)
5423{
5424 if (rdev->pg_flags) {
5425 si_enable_dma_pg(rdev, false);
5426 si_enable_gfx_cgpg(rdev, false);
5427 }
5436} 5428}
5437 5429
5438static void si_rlc_reset(struct radeon_device *rdev) 5430/*
5431 * RLC
5432 */
5433void si_rlc_reset(struct radeon_device *rdev)
5439{ 5434{
5440 u32 tmp = RREG32(GRBM_SOFT_RESET); 5435 u32 tmp = RREG32(GRBM_SOFT_RESET);
5441 5436
@@ -5651,7 +5646,7 @@ static int si_irq_init(struct radeon_device *rdev)
5651 WREG32(INTERRUPT_CNTL, interrupt_cntl); 5646 WREG32(INTERRUPT_CNTL, interrupt_cntl);
5652 5647
5653 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 5648 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5654 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 5649 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
5655 5650
5656 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 5651 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5657 IH_WPTR_OVERFLOW_CLEAR | 5652 IH_WPTR_OVERFLOW_CLEAR |
@@ -6335,80 +6330,6 @@ restart_ih:
6335 return IRQ_HANDLED; 6330 return IRQ_HANDLED;
6336} 6331}
6337 6332
6338/**
6339 * si_copy_dma - copy pages using the DMA engine
6340 *
6341 * @rdev: radeon_device pointer
6342 * @src_offset: src GPU address
6343 * @dst_offset: dst GPU address
6344 * @num_gpu_pages: number of GPU pages to xfer
6345 * @fence: radeon fence object
6346 *
6347 * Copy GPU paging using the DMA engine (SI).
6348 * Used by the radeon ttm implementation to move pages if
6349 * registered as the asic copy callback.
6350 */
6351int si_copy_dma(struct radeon_device *rdev,
6352 uint64_t src_offset, uint64_t dst_offset,
6353 unsigned num_gpu_pages,
6354 struct radeon_fence **fence)
6355{
6356 struct radeon_semaphore *sem = NULL;
6357 int ring_index = rdev->asic->copy.dma_ring_index;
6358 struct radeon_ring *ring = &rdev->ring[ring_index];
6359 u32 size_in_bytes, cur_size_in_bytes;
6360 int i, num_loops;
6361 int r = 0;
6362
6363 r = radeon_semaphore_create(rdev, &sem);
6364 if (r) {
6365 DRM_ERROR("radeon: moving bo (%d).\n", r);
6366 return r;
6367 }
6368
6369 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
6370 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
6371 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
6372 if (r) {
6373 DRM_ERROR("radeon: moving bo (%d).\n", r);
6374 radeon_semaphore_free(rdev, &sem, NULL);
6375 return r;
6376 }
6377
6378 if (radeon_fence_need_sync(*fence, ring->idx)) {
6379 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
6380 ring->idx);
6381 radeon_fence_note_sync(*fence, ring->idx);
6382 } else {
6383 radeon_semaphore_free(rdev, &sem, NULL);
6384 }
6385
6386 for (i = 0; i < num_loops; i++) {
6387 cur_size_in_bytes = size_in_bytes;
6388 if (cur_size_in_bytes > 0xFFFFF)
6389 cur_size_in_bytes = 0xFFFFF;
6390 size_in_bytes -= cur_size_in_bytes;
6391 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
6392 radeon_ring_write(ring, dst_offset & 0xffffffff);
6393 radeon_ring_write(ring, src_offset & 0xffffffff);
6394 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
6395 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
6396 src_offset += cur_size_in_bytes;
6397 dst_offset += cur_size_in_bytes;
6398 }
6399
6400 r = radeon_fence_emit(rdev, fence, ring->idx);
6401 if (r) {
6402 radeon_ring_unlock_undo(rdev, ring);
6403 return r;
6404 }
6405
6406 radeon_ring_unlock_commit(rdev, ring);
6407 radeon_semaphore_free(rdev, &sem, *fence);
6408
6409 return r;
6410}
6411
6412/* 6333/*
6413 * startup/shutdown callbacks 6334 * startup/shutdown callbacks
6414 */ 6335 */
@@ -6422,6 +6343,13 @@ static int si_startup(struct radeon_device *rdev)
6422 /* enable aspm */ 6343 /* enable aspm */
6423 si_program_aspm(rdev); 6344 si_program_aspm(rdev);
6424 6345
6346 /* scratch needs to be initialized before MC */
6347 r = r600_vram_scratch_init(rdev);
6348 if (r)
6349 return r;
6350
6351 si_mc_program(rdev);
6352
6425 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || 6353 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
6426 !rdev->rlc_fw || !rdev->mc_fw) { 6354 !rdev->rlc_fw || !rdev->mc_fw) {
6427 r = si_init_microcode(rdev); 6355 r = si_init_microcode(rdev);
@@ -6437,18 +6365,19 @@ static int si_startup(struct radeon_device *rdev)
6437 return r; 6365 return r;
6438 } 6366 }
6439 6367
6440 r = r600_vram_scratch_init(rdev);
6441 if (r)
6442 return r;
6443
6444 si_mc_program(rdev);
6445 r = si_pcie_gart_enable(rdev); 6368 r = si_pcie_gart_enable(rdev);
6446 if (r) 6369 if (r)
6447 return r; 6370 return r;
6448 si_gpu_init(rdev); 6371 si_gpu_init(rdev);
6449 6372
6450 /* allocate rlc buffers */ 6373 /* allocate rlc buffers */
6451 r = si_rlc_init(rdev); 6374 if (rdev->family == CHIP_VERDE) {
6375 rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
6376 rdev->rlc.reg_list_size =
6377 (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
6378 }
6379 rdev->rlc.cs_data = si_cs_data;
6380 r = sumo_rlc_init(rdev);
6452 if (r) { 6381 if (r) {
6453 DRM_ERROR("Failed to init rlc BOs!\n"); 6382 DRM_ERROR("Failed to init rlc BOs!\n");
6454 return r; 6383 return r;
@@ -6490,7 +6419,7 @@ static int si_startup(struct radeon_device *rdev)
6490 } 6419 }
6491 6420
6492 if (rdev->has_uvd) { 6421 if (rdev->has_uvd) {
6493 r = rv770_uvd_resume(rdev); 6422 r = uvd_v2_2_resume(rdev);
6494 if (!r) { 6423 if (!r) {
6495 r = radeon_fence_driver_start_ring(rdev, 6424 r = radeon_fence_driver_start_ring(rdev,
6496 R600_RING_TYPE_UVD_INDEX); 6425 R600_RING_TYPE_UVD_INDEX);
@@ -6519,21 +6448,21 @@ static int si_startup(struct radeon_device *rdev)
6519 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 6448 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6520 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 6449 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
6521 CP_RB0_RPTR, CP_RB0_WPTR, 6450 CP_RB0_RPTR, CP_RB0_WPTR,
6522 0, 0xfffff, RADEON_CP_PACKET2); 6451 RADEON_CP_PACKET2);
6523 if (r) 6452 if (r)
6524 return r; 6453 return r;
6525 6454
6526 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 6455 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6527 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, 6456 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
6528 CP_RB1_RPTR, CP_RB1_WPTR, 6457 CP_RB1_RPTR, CP_RB1_WPTR,
6529 0, 0xfffff, RADEON_CP_PACKET2); 6458 RADEON_CP_PACKET2);
6530 if (r) 6459 if (r)
6531 return r; 6460 return r;
6532 6461
6533 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 6462 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6534 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, 6463 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
6535 CP_RB2_RPTR, CP_RB2_WPTR, 6464 CP_RB2_RPTR, CP_RB2_WPTR,
6536 0, 0xfffff, RADEON_CP_PACKET2); 6465 RADEON_CP_PACKET2);
6537 if (r) 6466 if (r)
6538 return r; 6467 return r;
6539 6468
@@ -6541,7 +6470,7 @@ static int si_startup(struct radeon_device *rdev)
6541 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 6470 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
6542 DMA_RB_RPTR + DMA0_REGISTER_OFFSET, 6471 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
6543 DMA_RB_WPTR + DMA0_REGISTER_OFFSET, 6472 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
6544 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); 6473 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
6545 if (r) 6474 if (r)
6546 return r; 6475 return r;
6547 6476
@@ -6549,7 +6478,7 @@ static int si_startup(struct radeon_device *rdev)
6549 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 6478 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
6550 DMA_RB_RPTR + DMA1_REGISTER_OFFSET, 6479 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
6551 DMA_RB_WPTR + DMA1_REGISTER_OFFSET, 6480 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
6552 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); 6481 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
6553 if (r) 6482 if (r)
6554 return r; 6483 return r;
6555 6484
@@ -6567,12 +6496,11 @@ static int si_startup(struct radeon_device *rdev)
6567 if (rdev->has_uvd) { 6496 if (rdev->has_uvd) {
6568 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 6497 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6569 if (ring->ring_size) { 6498 if (ring->ring_size) {
6570 r = radeon_ring_init(rdev, ring, ring->ring_size, 6499 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
6571 R600_WB_UVD_RPTR_OFFSET,
6572 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 6500 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
6573 0, 0xfffff, RADEON_CP_PACKET2); 6501 RADEON_CP_PACKET2);
6574 if (!r) 6502 if (!r)
6575 r = r600_uvd_init(rdev); 6503 r = uvd_v1_0_init(rdev);
6576 if (r) 6504 if (r)
6577 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 6505 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
6578 } 6506 }
@@ -6590,6 +6518,10 @@ static int si_startup(struct radeon_device *rdev)
6590 return r; 6518 return r;
6591 } 6519 }
6592 6520
6521 r = dce6_audio_init(rdev);
6522 if (r)
6523 return r;
6524
6593 return 0; 6525 return 0;
6594} 6526}
6595 6527
@@ -6621,13 +6553,16 @@ int si_resume(struct radeon_device *rdev)
6621 6553
6622int si_suspend(struct radeon_device *rdev) 6554int si_suspend(struct radeon_device *rdev)
6623{ 6555{
6556 dce6_audio_fini(rdev);
6624 radeon_vm_manager_fini(rdev); 6557 radeon_vm_manager_fini(rdev);
6625 si_cp_enable(rdev, false); 6558 si_cp_enable(rdev, false);
6626 cayman_dma_stop(rdev); 6559 cayman_dma_stop(rdev);
6627 if (rdev->has_uvd) { 6560 if (rdev->has_uvd) {
6628 r600_uvd_rbc_stop(rdev); 6561 uvd_v1_0_fini(rdev);
6629 radeon_uvd_suspend(rdev); 6562 radeon_uvd_suspend(rdev);
6630 } 6563 }
6564 si_fini_pg(rdev);
6565 si_fini_cg(rdev);
6631 si_irq_suspend(rdev); 6566 si_irq_suspend(rdev);
6632 radeon_wb_disable(rdev); 6567 radeon_wb_disable(rdev);
6633 si_pcie_gart_disable(rdev); 6568 si_pcie_gart_disable(rdev);
@@ -6734,7 +6669,7 @@ int si_init(struct radeon_device *rdev)
6734 si_cp_fini(rdev); 6669 si_cp_fini(rdev);
6735 cayman_dma_fini(rdev); 6670 cayman_dma_fini(rdev);
6736 si_irq_fini(rdev); 6671 si_irq_fini(rdev);
6737 si_rlc_fini(rdev); 6672 sumo_rlc_fini(rdev);
6738 radeon_wb_fini(rdev); 6673 radeon_wb_fini(rdev);
6739 radeon_ib_pool_fini(rdev); 6674 radeon_ib_pool_fini(rdev);
6740 radeon_vm_manager_fini(rdev); 6675 radeon_vm_manager_fini(rdev);
@@ -6759,16 +6694,18 @@ void si_fini(struct radeon_device *rdev)
6759{ 6694{
6760 si_cp_fini(rdev); 6695 si_cp_fini(rdev);
6761 cayman_dma_fini(rdev); 6696 cayman_dma_fini(rdev);
6762 si_irq_fini(rdev);
6763 si_rlc_fini(rdev);
6764 si_fini_cg(rdev);
6765 si_fini_pg(rdev); 6697 si_fini_pg(rdev);
6698 si_fini_cg(rdev);
6699 si_irq_fini(rdev);
6700 sumo_rlc_fini(rdev);
6766 radeon_wb_fini(rdev); 6701 radeon_wb_fini(rdev);
6767 radeon_vm_manager_fini(rdev); 6702 radeon_vm_manager_fini(rdev);
6768 radeon_ib_pool_fini(rdev); 6703 radeon_ib_pool_fini(rdev);
6769 radeon_irq_kms_fini(rdev); 6704 radeon_irq_kms_fini(rdev);
6770 if (rdev->has_uvd) 6705 if (rdev->has_uvd) {
6706 uvd_v1_0_fini(rdev);
6771 radeon_uvd_fini(rdev); 6707 radeon_uvd_fini(rdev);
6708 }
6772 si_pcie_gart_fini(rdev); 6709 si_pcie_gart_fini(rdev);
6773 r600_vram_scratch_fini(rdev); 6710 r600_vram_scratch_fini(rdev);
6774 radeon_gem_fini(rdev); 6711 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
new file mode 100644
index 000000000000..49909d23dfce
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -0,0 +1,235 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "sid.h"
28
29u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
30
31/**
32 * si_dma_is_lockup - Check if the DMA engine is locked up
33 *
34 * @rdev: radeon_device pointer
35 * @ring: radeon_ring structure holding ring information
36 *
37 * Check if the async DMA engine is locked up.
38 * Returns true if the engine appears to be locked up, false if not.
39 */
40bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
41{
42 u32 reset_mask = si_gpu_check_soft_reset(rdev);
43 u32 mask;
44
45 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
46 mask = RADEON_RESET_DMA;
47 else
48 mask = RADEON_RESET_DMA1;
49
50 if (!(reset_mask & mask)) {
51 radeon_ring_lockup_update(ring);
52 return false;
53 }
54 /* force ring activities */
55 radeon_ring_force_activity(rdev, ring);
56 return radeon_ring_test_lockup(rdev, ring);
57}
58
59/**
60 * si_dma_vm_set_page - update the page tables using the DMA
61 *
62 * @rdev: radeon_device pointer
63 * @ib: indirect buffer to fill with commands
64 * @pe: addr of the page entry
65 * @addr: dst addr to write into pe
66 * @count: number of page entries to update
67 * @incr: increase next addr by incr bytes
68 * @flags: access flags
69 *
70 * Update the page tables using the DMA (SI).
71 */
72void si_dma_vm_set_page(struct radeon_device *rdev,
73 struct radeon_ib *ib,
74 uint64_t pe,
75 uint64_t addr, unsigned count,
76 uint32_t incr, uint32_t flags)
77{
78 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
79 uint64_t value;
80 unsigned ndw;
81
82 if (flags & RADEON_VM_PAGE_SYSTEM) {
83 while (count) {
84 ndw = count * 2;
85 if (ndw > 0xFFFFE)
86 ndw = 0xFFFFE;
87
88 /* for non-physically contiguous pages (system) */
89 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
90 ib->ptr[ib->length_dw++] = pe;
91 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
92 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
93 if (flags & RADEON_VM_PAGE_SYSTEM) {
94 value = radeon_vm_map_gart(rdev, addr);
95 value &= 0xFFFFFFFFFFFFF000ULL;
96 } else if (flags & RADEON_VM_PAGE_VALID) {
97 value = addr;
98 } else {
99 value = 0;
100 }
101 addr += incr;
102 value |= r600_flags;
103 ib->ptr[ib->length_dw++] = value;
104 ib->ptr[ib->length_dw++] = upper_32_bits(value);
105 }
106 }
107 } else {
108 while (count) {
109 ndw = count * 2;
110 if (ndw > 0xFFFFE)
111 ndw = 0xFFFFE;
112
113 if (flags & RADEON_VM_PAGE_VALID)
114 value = addr;
115 else
116 value = 0;
117 /* for physically contiguous pages (vram) */
118 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
119 ib->ptr[ib->length_dw++] = pe; /* dst addr */
120 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
121 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
122 ib->ptr[ib->length_dw++] = 0;
123 ib->ptr[ib->length_dw++] = value; /* value */
124 ib->ptr[ib->length_dw++] = upper_32_bits(value);
125 ib->ptr[ib->length_dw++] = incr; /* increment size */
126 ib->ptr[ib->length_dw++] = 0;
127 pe += ndw * 4;
128 addr += (ndw / 2) * incr;
129 count -= ndw / 2;
130 }
131 }
132 while (ib->length_dw & 0x7)
133 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
134}
135
136void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
137{
138 struct radeon_ring *ring = &rdev->ring[ridx];
139
140 if (vm == NULL)
141 return;
142
143 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
144 if (vm->id < 8) {
145 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
146 } else {
147 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
148 }
149 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
150
151 /* flush hdp cache */
152 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
153 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
154 radeon_ring_write(ring, 1);
155
156 /* bits 0-7 are the VM contexts0-7 */
157 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
158 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
159 radeon_ring_write(ring, 1 << vm->id);
160}
161
162/**
163 * si_copy_dma - copy pages using the DMA engine
164 *
165 * @rdev: radeon_device pointer
166 * @src_offset: src GPU address
167 * @dst_offset: dst GPU address
168 * @num_gpu_pages: number of GPU pages to xfer
169 * @fence: radeon fence object
170 *
171 * Copy GPU paging using the DMA engine (SI).
172 * Used by the radeon ttm implementation to move pages if
173 * registered as the asic copy callback.
174 */
175int si_copy_dma(struct radeon_device *rdev,
176 uint64_t src_offset, uint64_t dst_offset,
177 unsigned num_gpu_pages,
178 struct radeon_fence **fence)
179{
180 struct radeon_semaphore *sem = NULL;
181 int ring_index = rdev->asic->copy.dma_ring_index;
182 struct radeon_ring *ring = &rdev->ring[ring_index];
183 u32 size_in_bytes, cur_size_in_bytes;
184 int i, num_loops;
185 int r = 0;
186
187 r = radeon_semaphore_create(rdev, &sem);
188 if (r) {
189 DRM_ERROR("radeon: moving bo (%d).\n", r);
190 return r;
191 }
192
193 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
194 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
195 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
196 if (r) {
197 DRM_ERROR("radeon: moving bo (%d).\n", r);
198 radeon_semaphore_free(rdev, &sem, NULL);
199 return r;
200 }
201
202 if (radeon_fence_need_sync(*fence, ring->idx)) {
203 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
204 ring->idx);
205 radeon_fence_note_sync(*fence, ring->idx);
206 } else {
207 radeon_semaphore_free(rdev, &sem, NULL);
208 }
209
210 for (i = 0; i < num_loops; i++) {
211 cur_size_in_bytes = size_in_bytes;
212 if (cur_size_in_bytes > 0xFFFFF)
213 cur_size_in_bytes = 0xFFFFF;
214 size_in_bytes -= cur_size_in_bytes;
215 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
216 radeon_ring_write(ring, dst_offset & 0xffffffff);
217 radeon_ring_write(ring, src_offset & 0xffffffff);
218 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
219 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
220 src_offset += cur_size_in_bytes;
221 dst_offset += cur_size_in_bytes;
222 }
223
224 r = radeon_fence_emit(rdev, fence, ring->idx);
225 if (r) {
226 radeon_ring_unlock_undo(rdev, ring);
227 return r;
228 }
229
230 radeon_ring_unlock_commit(rdev, ring);
231 radeon_semaphore_free(rdev, &sem, *fence);
232
233 return r;
234}
235
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 73aaa2e4c312..5be9b4e72350 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -37,8 +37,6 @@
37 37
38#define SMC_RAM_END 0x20000 38#define SMC_RAM_END 0x20000
39 39
40#define DDR3_DRAM_ROWS 0x2000
41
42#define SCLK_MIN_DEEPSLEEP_FREQ 1350 40#define SCLK_MIN_DEEPSLEEP_FREQ 1350
43 41
44static const struct si_cac_config_reg cac_weights_tahiti[] = 42static const struct si_cac_config_reg cac_weights_tahiti[] =
@@ -1755,6 +1753,9 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
1755 u32 engine_clock, 1753 u32 engine_clock,
1756 SISLANDS_SMC_SCLK_VALUE *sclk); 1754 SISLANDS_SMC_SCLK_VALUE *sclk);
1757 1755
1756extern void si_update_cg(struct radeon_device *rdev,
1757 u32 block, bool enable);
1758
1758static struct si_power_info *si_get_pi(struct radeon_device *rdev) 1759static struct si_power_info *si_get_pi(struct radeon_device *rdev)
1759{ 1760{
1760 struct si_power_info *pi = rdev->pm.dpm.priv; 1761 struct si_power_info *pi = rdev->pm.dpm.priv;
@@ -1767,8 +1768,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe
1767{ 1768{
1768 s64 kt, kv, leakage_w, i_leakage, vddc; 1769 s64 kt, kv, leakage_w, i_leakage, vddc;
1769 s64 temperature, t_slope, t_intercept, av, bv, t_ref; 1770 s64 temperature, t_slope, t_intercept, av, bv, t_ref;
1771 s64 tmp;
1770 1772
1771 i_leakage = drm_int2fixp(ileakage / 100); 1773 i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
1772 vddc = div64_s64(drm_int2fixp(v), 1000); 1774 vddc = div64_s64(drm_int2fixp(v), 1000);
1773 temperature = div64_s64(drm_int2fixp(t), 1000); 1775 temperature = div64_s64(drm_int2fixp(t), 1000);
1774 1776
@@ -1778,8 +1780,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe
1778 bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); 1780 bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
1779 t_ref = drm_int2fixp(coeff->t_ref); 1781 t_ref = drm_int2fixp(coeff->t_ref);
1780 1782
1781 kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)), 1783 tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
1782 drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref))); 1784 kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
1785 kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
1783 kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); 1786 kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
1784 1787
1785 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); 1788 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
@@ -1931,6 +1934,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1931 si_pi->cac_override = cac_override_pitcairn; 1934 si_pi->cac_override = cac_override_pitcairn;
1932 si_pi->powertune_data = &powertune_data_pitcairn; 1935 si_pi->powertune_data = &powertune_data_pitcairn;
1933 si_pi->dte_data = dte_data_pitcairn; 1936 si_pi->dte_data = dte_data_pitcairn;
1937 break;
1934 } 1938 }
1935 } else if (rdev->family == CHIP_VERDE) { 1939 } else if (rdev->family == CHIP_VERDE) {
1936 si_pi->lcac_config = lcac_cape_verde; 1940 si_pi->lcac_config = lcac_cape_verde;
@@ -1941,6 +1945,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1941 case 0x683B: 1945 case 0x683B:
1942 case 0x683F: 1946 case 0x683F:
1943 case 0x6829: 1947 case 0x6829:
1948 case 0x6835:
1944 si_pi->cac_weights = cac_weights_cape_verde_pro; 1949 si_pi->cac_weights = cac_weights_cape_verde_pro;
1945 si_pi->dte_data = dte_data_cape_verde; 1950 si_pi->dte_data = dte_data_cape_verde;
1946 break; 1951 break;
@@ -2901,7 +2906,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2901{ 2906{
2902 struct ni_ps *ps = ni_get_ps(rps); 2907 struct ni_ps *ps = ni_get_ps(rps);
2903 struct radeon_clock_and_voltage_limits *max_limits; 2908 struct radeon_clock_and_voltage_limits *max_limits;
2904 bool disable_mclk_switching; 2909 bool disable_mclk_switching = false;
2910 bool disable_sclk_switching = false;
2905 u32 mclk, sclk; 2911 u32 mclk, sclk;
2906 u16 vddc, vddci; 2912 u16 vddc, vddci;
2907 int i; 2913 int i;
@@ -2909,8 +2915,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2909 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2915 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
2910 ni_dpm_vblank_too_short(rdev)) 2916 ni_dpm_vblank_too_short(rdev))
2911 disable_mclk_switching = true; 2917 disable_mclk_switching = true;
2912 else 2918
2913 disable_mclk_switching = false; 2919 if (rps->vclk || rps->dclk) {
2920 disable_mclk_switching = true;
2921 disable_sclk_switching = true;
2922 }
2914 2923
2915 if (rdev->pm.dpm.ac_power) 2924 if (rdev->pm.dpm.ac_power)
2916 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2925 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
@@ -2938,27 +2947,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2938 2947
2939 if (disable_mclk_switching) { 2948 if (disable_mclk_switching) {
2940 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 2949 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
2941 sclk = ps->performance_levels[0].sclk;
2942 vddc = ps->performance_levels[0].vddc;
2943 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; 2950 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
2944 } else { 2951 } else {
2945 sclk = ps->performance_levels[0].sclk;
2946 mclk = ps->performance_levels[0].mclk; 2952 mclk = ps->performance_levels[0].mclk;
2947 vddc = ps->performance_levels[0].vddc;
2948 vddci = ps->performance_levels[0].vddci; 2953 vddci = ps->performance_levels[0].vddci;
2949 } 2954 }
2950 2955
2956 if (disable_sclk_switching) {
2957 sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
2958 vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
2959 } else {
2960 sclk = ps->performance_levels[0].sclk;
2961 vddc = ps->performance_levels[0].vddc;
2962 }
2963
2951 /* adjusted low state */ 2964 /* adjusted low state */
2952 ps->performance_levels[0].sclk = sclk; 2965 ps->performance_levels[0].sclk = sclk;
2953 ps->performance_levels[0].mclk = mclk; 2966 ps->performance_levels[0].mclk = mclk;
2954 ps->performance_levels[0].vddc = vddc; 2967 ps->performance_levels[0].vddc = vddc;
2955 ps->performance_levels[0].vddci = vddci; 2968 ps->performance_levels[0].vddci = vddci;
2956 2969
2957 for (i = 1; i < ps->performance_level_count; i++) { 2970 if (disable_sclk_switching) {
2958 if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) 2971 sclk = ps->performance_levels[0].sclk;
2959 ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; 2972 for (i = 1; i < ps->performance_level_count; i++) {
2960 if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) 2973 if (sclk < ps->performance_levels[i].sclk)
2961 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; 2974 sclk = ps->performance_levels[i].sclk;
2975 }
2976 for (i = 0; i < ps->performance_level_count; i++) {
2977 ps->performance_levels[i].sclk = sclk;
2978 ps->performance_levels[i].vddc = vddc;
2979 }
2980 } else {
2981 for (i = 1; i < ps->performance_level_count; i++) {
2982 if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
2983 ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
2984 if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
2985 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
2986 }
2962 } 2987 }
2963 2988
2964 if (disable_mclk_switching) { 2989 if (disable_mclk_switching) {
@@ -3237,10 +3262,10 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
3237{ 3262{
3238 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 3263 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
3239 struct ni_ps *ps = ni_get_ps(rps); 3264 struct ni_ps *ps = ni_get_ps(rps);
3240 u32 levels; 3265 u32 levels = ps->performance_level_count;
3241 3266
3242 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 3267 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3243 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) 3268 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3244 return -EINVAL; 3269 return -EINVAL;
3245 3270
3246 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) 3271 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
@@ -3249,14 +3274,13 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
3249 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 3274 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3250 return -EINVAL; 3275 return -EINVAL;
3251 3276
3252 levels = ps->performance_level_count - 1; 3277 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
3253 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3254 return -EINVAL; 3278 return -EINVAL;
3255 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 3279 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3256 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) 3280 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3257 return -EINVAL; 3281 return -EINVAL;
3258 3282
3259 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) 3283 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3260 return -EINVAL; 3284 return -EINVAL;
3261 } 3285 }
3262 3286
@@ -3620,8 +3644,12 @@ static void si_enable_display_gap(struct radeon_device *rdev)
3620{ 3644{
3621 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); 3645 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
3622 3646
3647 tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
3648 tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
3649 DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
3650
3623 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); 3651 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
3624 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) | 3652 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
3625 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); 3653 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
3626 WREG32(CG_DISPLAY_GAP_CNTL, tmp); 3654 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
3627} 3655}
@@ -3638,7 +3666,7 @@ static void si_clear_vc(struct radeon_device *rdev)
3638 WREG32(CG_FTV, 0); 3666 WREG32(CG_FTV, 0);
3639} 3667}
3640 3668
3641static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) 3669u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
3642{ 3670{
3643 u8 mc_para_index; 3671 u8 mc_para_index;
3644 3672
@@ -3651,7 +3679,7 @@ static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
3651 return mc_para_index; 3679 return mc_para_index;
3652} 3680}
3653 3681
3654static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) 3682u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
3655{ 3683{
3656 u8 mc_para_index; 3684 u8 mc_para_index;
3657 3685
@@ -3733,20 +3761,21 @@ static bool si_validate_phase_shedding_tables(struct radeon_device *rdev,
3733 return true; 3761 return true;
3734} 3762}
3735 3763
3736static void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, 3764void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
3737 struct atom_voltage_table *voltage_table) 3765 u32 max_voltage_steps,
3766 struct atom_voltage_table *voltage_table)
3738{ 3767{
3739 unsigned int i, diff; 3768 unsigned int i, diff;
3740 3769
3741 if (voltage_table->count <= SISLANDS_MAX_NO_VREG_STEPS) 3770 if (voltage_table->count <= max_voltage_steps)
3742 return; 3771 return;
3743 3772
3744 diff = voltage_table->count - SISLANDS_MAX_NO_VREG_STEPS; 3773 diff = voltage_table->count - max_voltage_steps;
3745 3774
3746 for (i= 0; i < SISLANDS_MAX_NO_VREG_STEPS; i++) 3775 for (i= 0; i < max_voltage_steps; i++)
3747 voltage_table->entries[i] = voltage_table->entries[i + diff]; 3776 voltage_table->entries[i] = voltage_table->entries[i + diff];
3748 3777
3749 voltage_table->count = SISLANDS_MAX_NO_VREG_STEPS; 3778 voltage_table->count = max_voltage_steps;
3750} 3779}
3751 3780
3752static int si_construct_voltage_tables(struct radeon_device *rdev) 3781static int si_construct_voltage_tables(struct radeon_device *rdev)
@@ -3762,7 +3791,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
3762 return ret; 3791 return ret;
3763 3792
3764 if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) 3793 if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3765 si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddc_voltage_table); 3794 si_trim_voltage_table_to_fit_state_table(rdev,
3795 SISLANDS_MAX_NO_VREG_STEPS,
3796 &eg_pi->vddc_voltage_table);
3766 3797
3767 if (eg_pi->vddci_control) { 3798 if (eg_pi->vddci_control) {
3768 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, 3799 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
@@ -3771,7 +3802,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
3771 return ret; 3802 return ret;
3772 3803
3773 if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) 3804 if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3774 si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddci_voltage_table); 3805 si_trim_voltage_table_to_fit_state_table(rdev,
3806 SISLANDS_MAX_NO_VREG_STEPS,
3807 &eg_pi->vddci_voltage_table);
3775 } 3808 }
3776 3809
3777 if (pi->mvdd_control) { 3810 if (pi->mvdd_control) {
@@ -3789,7 +3822,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
3789 } 3822 }
3790 3823
3791 if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) 3824 if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3792 si_trim_voltage_table_to_fit_state_table(rdev, &si_pi->mvdd_voltage_table); 3825 si_trim_voltage_table_to_fit_state_table(rdev,
3826 SISLANDS_MAX_NO_VREG_STEPS,
3827 &si_pi->mvdd_voltage_table);
3793 } 3828 }
3794 3829
3795 if (si_pi->vddc_phase_shed_control) { 3830 if (si_pi->vddc_phase_shed_control) {
@@ -4036,16 +4071,15 @@ static int si_force_switch_to_arb_f0(struct radeon_device *rdev)
4036static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev, 4071static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev,
4037 u32 engine_clock) 4072 u32 engine_clock)
4038{ 4073{
4039 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4040 u32 dram_rows; 4074 u32 dram_rows;
4041 u32 dram_refresh_rate; 4075 u32 dram_refresh_rate;
4042 u32 mc_arb_rfsh_rate; 4076 u32 mc_arb_rfsh_rate;
4043 u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 4077 u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
4044 4078
4045 if (pi->mem_gddr5) 4079 if (tmp >= 4)
4046 dram_rows = 1 << (tmp + 10); 4080 dram_rows = 16384;
4047 else 4081 else
4048 dram_rows = DDR3_DRAM_ROWS; 4082 dram_rows = 1 << (tmp + 10);
4049 4083
4050 dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); 4084 dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
4051 mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; 4085 mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
@@ -5728,6 +5762,13 @@ int si_dpm_enable(struct radeon_device *rdev)
5728 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5762 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5729 int ret; 5763 int ret;
5730 5764
5765 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5766 RADEON_CG_BLOCK_MC |
5767 RADEON_CG_BLOCK_SDMA |
5768 RADEON_CG_BLOCK_BIF |
5769 RADEON_CG_BLOCK_UVD |
5770 RADEON_CG_BLOCK_HDP), false);
5771
5731 if (si_is_smc_running(rdev)) 5772 if (si_is_smc_running(rdev))
5732 return -EINVAL; 5773 return -EINVAL;
5733 if (pi->voltage_control) 5774 if (pi->voltage_control)
@@ -5847,6 +5888,13 @@ int si_dpm_enable(struct radeon_device *rdev)
5847 5888
5848 si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 5889 si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5849 5890
5891 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5892 RADEON_CG_BLOCK_MC |
5893 RADEON_CG_BLOCK_SDMA |
5894 RADEON_CG_BLOCK_BIF |
5895 RADEON_CG_BLOCK_UVD |
5896 RADEON_CG_BLOCK_HDP), true);
5897
5850 ni_update_current_ps(rdev, boot_ps); 5898 ni_update_current_ps(rdev, boot_ps);
5851 5899
5852 return 0; 5900 return 0;
@@ -5857,6 +5905,13 @@ void si_dpm_disable(struct radeon_device *rdev)
5857 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 5905 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
5858 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5906 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5859 5907
5908 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5909 RADEON_CG_BLOCK_MC |
5910 RADEON_CG_BLOCK_SDMA |
5911 RADEON_CG_BLOCK_BIF |
5912 RADEON_CG_BLOCK_UVD |
5913 RADEON_CG_BLOCK_HDP), false);
5914
5860 if (!si_is_smc_running(rdev)) 5915 if (!si_is_smc_running(rdev))
5861 return; 5916 return;
5862 si_disable_ulv(rdev); 5917 si_disable_ulv(rdev);
@@ -5921,6 +5976,13 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
5921 struct radeon_ps *old_ps = &eg_pi->current_rps; 5976 struct radeon_ps *old_ps = &eg_pi->current_rps;
5922 int ret; 5977 int ret;
5923 5978
5979 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5980 RADEON_CG_BLOCK_MC |
5981 RADEON_CG_BLOCK_SDMA |
5982 RADEON_CG_BLOCK_BIF |
5983 RADEON_CG_BLOCK_UVD |
5984 RADEON_CG_BLOCK_HDP), false);
5985
5924 ret = si_disable_ulv(rdev); 5986 ret = si_disable_ulv(rdev);
5925 if (ret) { 5987 if (ret) {
5926 DRM_ERROR("si_disable_ulv failed\n"); 5988 DRM_ERROR("si_disable_ulv failed\n");
@@ -6013,16 +6075,18 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
6013 return ret; 6075 return ret;
6014 } 6076 }
6015 6077
6016#if 0
6017 /* XXX */
6018 ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 6078 ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
6019 if (ret) { 6079 if (ret) {
6020 DRM_ERROR("si_dpm_force_performance_level failed\n"); 6080 DRM_ERROR("si_dpm_force_performance_level failed\n");
6021 return ret; 6081 return ret;
6022 } 6082 }
6023#else 6083
6024 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 6084 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
6025#endif 6085 RADEON_CG_BLOCK_MC |
6086 RADEON_CG_BLOCK_SDMA |
6087 RADEON_CG_BLOCK_BIF |
6088 RADEON_CG_BLOCK_UVD |
6089 RADEON_CG_BLOCK_HDP), true);
6026 6090
6027 return 0; 6091 return 0;
6028} 6092}
@@ -6213,6 +6277,7 @@ static int si_parse_power_table(struct radeon_device *rdev)
6213 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 6277 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
6214 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 6278 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
6215 for (i = 0; i < state_array->ucNumEntries; i++) { 6279 for (i = 0; i < state_array->ucNumEntries; i++) {
6280 u8 *idx;
6216 power_state = (union pplib_power_state *)power_state_offset; 6281 power_state = (union pplib_power_state *)power_state_offset;
6217 non_clock_array_index = power_state->v2.nonClockInfoIndex; 6282 non_clock_array_index = power_state->v2.nonClockInfoIndex;
6218 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 6283 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -6229,14 +6294,16 @@ static int si_parse_power_table(struct radeon_device *rdev)
6229 non_clock_info, 6294 non_clock_info,
6230 non_clock_info_array->ucEntrySize); 6295 non_clock_info_array->ucEntrySize);
6231 k = 0; 6296 k = 0;
6297 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
6232 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 6298 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
6233 clock_array_index = power_state->v2.clockInfoIndex[j]; 6299 clock_array_index = idx[j];
6234 if (clock_array_index >= clock_info_array->ucNumEntries) 6300 if (clock_array_index >= clock_info_array->ucNumEntries)
6235 continue; 6301 continue;
6236 if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS) 6302 if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
6237 break; 6303 break;
6238 clock_info = (union pplib_clock_info *) 6304 clock_info = (union pplib_clock_info *)
6239 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 6305 ((u8 *)&clock_info_array->clockInfo[0] +
6306 (clock_array_index * clock_info_array->ucEntrySize));
6240 si_parse_pplib_clock_info(rdev, 6307 si_parse_pplib_clock_info(rdev,
6241 &rdev->pm.dpm.ps[i], k, 6308 &rdev->pm.dpm.ps[i], k,
6242 clock_info); 6309 clock_info);
@@ -6254,9 +6321,6 @@ int si_dpm_init(struct radeon_device *rdev)
6254 struct evergreen_power_info *eg_pi; 6321 struct evergreen_power_info *eg_pi;
6255 struct ni_power_info *ni_pi; 6322 struct ni_power_info *ni_pi;
6256 struct si_power_info *si_pi; 6323 struct si_power_info *si_pi;
6257 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
6258 u16 data_offset, size;
6259 u8 frev, crev;
6260 struct atom_clock_dividers dividers; 6324 struct atom_clock_dividers dividers;
6261 int ret; 6325 int ret;
6262 u32 mask; 6326 u32 mask;
@@ -6347,16 +6411,7 @@ int si_dpm_init(struct radeon_device *rdev)
6347 si_pi->vddc_phase_shed_control = 6411 si_pi->vddc_phase_shed_control =
6348 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); 6412 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
6349 6413
6350 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 6414 rv770_get_engine_memory_ss(rdev);
6351 &frev, &crev, &data_offset)) {
6352 pi->sclk_ss = true;
6353 pi->mclk_ss = true;
6354 pi->dynamic_ss = true;
6355 } else {
6356 pi->sclk_ss = false;
6357 pi->mclk_ss = false;
6358 pi->dynamic_ss = true;
6359 }
6360 6415
6361 pi->asi = RV770_ASI_DFLT; 6416 pi->asi = RV770_ASI_DFLT;
6362 pi->pasi = CYPRESS_HASI_DFLT; 6417 pi->pasi = CYPRESS_HASI_DFLT;
@@ -6367,8 +6422,7 @@ int si_dpm_init(struct radeon_device *rdev)
6367 eg_pi->sclk_deep_sleep = true; 6422 eg_pi->sclk_deep_sleep = true;
6368 si_pi->sclk_deep_sleep_above_low = false; 6423 si_pi->sclk_deep_sleep_above_low = false;
6369 6424
6370 if (pi->gfx_clock_gating && 6425 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6371 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
6372 pi->thermal_protection = true; 6426 pi->thermal_protection = true;
6373 else 6427 else
6374 pi->thermal_protection = false; 6428 pi->thermal_protection = false;
@@ -6395,6 +6449,12 @@ int si_dpm_init(struct radeon_device *rdev)
6395 6449
6396 si_initialize_powertune_defaults(rdev); 6450 si_initialize_powertune_defaults(rdev);
6397 6451
6452 /* make sure dc limits are valid */
6453 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6454 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6455 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6456 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6457
6398 return 0; 6458 return 0;
6399} 6459}
6400 6460
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 2c8da27a929f..52d2ab6b67a0 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -282,6 +282,10 @@
282 282
283#define DMIF_ADDR_CALC 0xC00 283#define DMIF_ADDR_CALC 0xC00
284 284
285#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
286# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
287# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
288
285#define SRBM_STATUS 0xE50 289#define SRBM_STATUS 0xE50
286#define GRBM_RQ_PENDING (1 << 5) 290#define GRBM_RQ_PENDING (1 << 5)
287#define VMC_BUSY (1 << 8) 291#define VMC_BUSY (1 << 8)
@@ -581,6 +585,7 @@
581#define CLKS_MASK (0xfff << 0) 585#define CLKS_MASK (0xfff << 0)
582 586
583#define HDP_HOST_PATH_CNTL 0x2C00 587#define HDP_HOST_PATH_CNTL 0x2C00
588#define CLOCK_GATING_DIS (1 << 23)
584#define HDP_NONSURFACE_BASE 0x2C04 589#define HDP_NONSURFACE_BASE 0x2C04
585#define HDP_NONSURFACE_INFO 0x2C08 590#define HDP_NONSURFACE_INFO 0x2C08
586#define HDP_NONSURFACE_SIZE 0x2C0C 591#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -588,6 +593,8 @@
588#define HDP_ADDR_CONFIG 0x2F48 593#define HDP_ADDR_CONFIG 0x2F48
589#define HDP_MISC_CNTL 0x2F4C 594#define HDP_MISC_CNTL 0x2F4C
590#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) 595#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
596#define HDP_MEM_POWER_LS 0x2F50
597#define HDP_LS_ENABLE (1 << 0)
591 598
592#define ATC_MISC_CG 0x3350 599#define ATC_MISC_CG 0x3350
593 600
@@ -635,6 +642,54 @@
635 642
636#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 643#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
637 644
645/* DCE6 ELD audio interface */
646#define AZ_F0_CODEC_ENDPOINT_INDEX 0x5E00
647# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
648# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
649#define AZ_F0_CODEC_ENDPOINT_DATA 0x5E04
650
651#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
652#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
653#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
654#define SPEAKER_ALLOCATION_SHIFT 0
655#define HDMI_CONNECTION (1 << 16)
656#define DP_CONNECTION (1 << 17)
657
658#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
659#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
660#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
661#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
662#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
663#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
664#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
665#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
666#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
667#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
668#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
669#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
670#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
671#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
672# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
673/* max channels minus one. 7 = 8 channels */
674# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
675# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
676# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
677/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
678 * bit0 = 32 kHz
679 * bit1 = 44.1 kHz
680 * bit2 = 48 kHz
681 * bit3 = 88.2 kHz
682 * bit4 = 96 kHz
683 * bit5 = 176.4 kHz
684 * bit6 = 192 kHz
685 */
686#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
687# define AUDIO_ENABLED (1 << 31)
688
689#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
690#define PORT_CONNECTIVITY_MASK (3 << 30)
691#define PORT_CONNECTIVITY_SHIFT 30
692
638#define DC_LB_MEMORY_SPLIT 0x6b0c 693#define DC_LB_MEMORY_SPLIT 0x6b0c
639#define DC_LB_MEMORY_CONFIG(x) ((x) << 20) 694#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
640 695
@@ -755,6 +810,17 @@
755/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ 810/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
756#define CRTC_STATUS_FRAME_COUNT 0x6e98 811#define CRTC_STATUS_FRAME_COUNT 0x6e98
757 812
813#define AFMT_AUDIO_SRC_CONTROL 0x713c
814#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
815/* AFMT_AUDIO_SRC_SELECT
816 * 0 = stream0
817 * 1 = stream1
818 * 2 = stream2
819 * 3 = stream3
820 * 4 = stream4
821 * 5 = stream5
822 */
823
758#define GRBM_CNTL 0x8000 824#define GRBM_CNTL 0x8000
759#define GRBM_READ_TIMEOUT(x) ((x) << 0) 825#define GRBM_READ_TIMEOUT(x) ((x) << 0)
760 826
@@ -1295,6 +1361,7 @@
1295/* PCIE registers idx/data 0x30/0x34 */ 1361/* PCIE registers idx/data 0x30/0x34 */
1296#define PCIE_CNTL2 0x1c /* PCIE */ 1362#define PCIE_CNTL2 0x1c /* PCIE */
1297# define SLV_MEM_LS_EN (1 << 16) 1363# define SLV_MEM_LS_EN (1 << 16)
1364# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
1298# define MST_MEM_LS_EN (1 << 18) 1365# define MST_MEM_LS_EN (1 << 18)
1299# define REPLAY_MEM_LS_EN (1 << 19) 1366# define REPLAY_MEM_LS_EN (1 << 19)
1300#define PCIE_LC_STATUS1 0x28 /* PCIE */ 1367#define PCIE_LC_STATUS1 0x28 /* PCIE */
@@ -1644,6 +1711,10 @@
1644# define DMA_IDLE (1 << 0) 1711# define DMA_IDLE (1 << 0)
1645#define DMA_TILING_CONFIG 0xd0b8 1712#define DMA_TILING_CONFIG 0xd0b8
1646 1713
1714#define DMA_POWER_CNTL 0xd0bc
1715# define MEM_POWER_OVERRIDE (1 << 8)
1716#define DMA_CLK_CTRL 0xd0c0
1717
1647#define DMA_PG 0xd0d4 1718#define DMA_PG 0xd0d4
1648# define PG_CNTL_ENABLE (1 << 0) 1719# define PG_CNTL_ENABLE (1 << 0)
1649#define DMA_PGFSM_CONFIG 0xd0d8 1720#define DMA_PGFSM_CONFIG 0xd0d8
diff --git a/drivers/gpu/drm/radeon/smu7.h b/drivers/gpu/drm/radeon/smu7.h
new file mode 100644
index 000000000000..75a380a15292
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7.h
@@ -0,0 +1,170 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef SMU7_H
25#define SMU7_H
26
27#pragma pack(push, 1)
28
29#define SMU7_CONTEXT_ID_SMC 1
30#define SMU7_CONTEXT_ID_VBIOS 2
31
32
33#define SMU7_CONTEXT_ID_SMC 1
34#define SMU7_CONTEXT_ID_VBIOS 2
35
36#define SMU7_MAX_LEVELS_VDDC 8
37#define SMU7_MAX_LEVELS_VDDCI 4
38#define SMU7_MAX_LEVELS_MVDD 4
39#define SMU7_MAX_LEVELS_VDDNB 8
40
41#define SMU7_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV
42#define SMU7_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM
43#define SMU7_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels
44#define SMU7_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes.
45#define SMU7_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD.
46#define SMU7_MAX_LEVELS_VCE 8 // ECLK levels for VCE.
47#define SMU7_MAX_LEVELS_ACP 8 // ACLK levels for ACP.
48#define SMU7_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU.
49#define SMU7_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table.
50
51#define DPM_NO_LIMIT 0
52#define DPM_NO_UP 1
53#define DPM_GO_DOWN 2
54#define DPM_GO_UP 3
55
56#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
57#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
58
59#define GPIO_CLAMP_MODE_VRHOT 1
60#define GPIO_CLAMP_MODE_THERM 2
61#define GPIO_CLAMP_MODE_DC 4
62
63#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
64#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
65#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
66#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
67#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
68#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
69#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
70#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
71#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
72#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
73#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
74#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
75#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
76#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
77#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
78#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
79#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
80#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
81#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
82#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
83
84
85struct SMU7_PIDController
86{
87 uint32_t Ki;
88 int32_t LFWindupUL;
89 int32_t LFWindupLL;
90 uint32_t StatePrecision;
91 uint32_t LfPrecision;
92 uint32_t LfOffset;
93 uint32_t MaxState;
94 uint32_t MaxLfFraction;
95 uint32_t StateShift;
96};
97
98typedef struct SMU7_PIDController SMU7_PIDController;
99
100// -------------------------------------------------------------------------------------------------------------------------
101#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
102
103#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
104#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
105#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
106#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
107#define SMU7_UVD_DPM_CONFIG_MASK 0x10
108#define SMU7_VCE_DPM_CONFIG_MASK 0x20
109#define SMU7_ACP_DPM_CONFIG_MASK 0x40
110#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
111#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
112
113#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
114#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
115#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
116#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
117#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
118#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
119
120struct SMU7_Firmware_Header
121{
122 uint32_t Digest[5];
123 uint32_t Version;
124 uint32_t HeaderSize;
125 uint32_t Flags;
126 uint32_t EntryPoint;
127 uint32_t CodeSize;
128 uint32_t ImageSize;
129
130 uint32_t Rtos;
131 uint32_t SoftRegisters;
132 uint32_t DpmTable;
133 uint32_t FanTable;
134 uint32_t CacConfigTable;
135 uint32_t CacStatusTable;
136
137 uint32_t mcRegisterTable;
138
139 uint32_t mcArbDramTimingTable;
140
141 uint32_t PmFuseTable;
142 uint32_t Globals;
143 uint32_t Reserved[42];
144 uint32_t Signature;
145};
146
147typedef struct SMU7_Firmware_Header SMU7_Firmware_Header;
148
149#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
150
151enum DisplayConfig {
152 PowerDown = 1,
153 DP54x4,
154 DP54x2,
155 DP54x1,
156 DP27x4,
157 DP27x2,
158 DP27x1,
159 HDMI297,
160 HDMI162,
161 LVDS,
162 DP324x4,
163 DP324x2,
164 DP324x1
165};
166
167#pragma pack(pop)
168
169#endif
170
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
new file mode 100644
index 000000000000..82f70c90a9ee
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -0,0 +1,486 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef SMU7_DISCRETE_H
25#define SMU7_DISCRETE_H
26
27#include "smu7.h"
28
29#pragma pack(push, 1)
30
31#define SMU7_DTE_ITERATIONS 5
32#define SMU7_DTE_SOURCES 3
33#define SMU7_DTE_SINKS 1
34#define SMU7_NUM_CPU_TES 0
35#define SMU7_NUM_GPU_TES 1
36#define SMU7_NUM_NON_TES 2
37
38struct SMU7_SoftRegisters
39{
40 uint32_t RefClockFrequency;
41 uint32_t PmTimerP;
42 uint32_t FeatureEnables;
43 uint32_t PreVBlankGap;
44 uint32_t VBlankTimeout;
45 uint32_t TrainTimeGap;
46
47 uint32_t MvddSwitchTime;
48 uint32_t LongestAcpiTrainTime;
49 uint32_t AcpiDelay;
50 uint32_t G5TrainTime;
51 uint32_t DelayMpllPwron;
52 uint32_t VoltageChangeTimeout;
53 uint32_t HandshakeDisables;
54
55 uint8_t DisplayPhy1Config;
56 uint8_t DisplayPhy2Config;
57 uint8_t DisplayPhy3Config;
58 uint8_t DisplayPhy4Config;
59
60 uint8_t DisplayPhy5Config;
61 uint8_t DisplayPhy6Config;
62 uint8_t DisplayPhy7Config;
63 uint8_t DisplayPhy8Config;
64
65 uint32_t AverageGraphicsA;
66 uint32_t AverageMemoryA;
67 uint32_t AverageGioA;
68
69 uint8_t SClkDpmEnabledLevels;
70 uint8_t MClkDpmEnabledLevels;
71 uint8_t LClkDpmEnabledLevels;
72 uint8_t PCIeDpmEnabledLevels;
73
74 uint8_t UVDDpmEnabledLevels;
75 uint8_t SAMUDpmEnabledLevels;
76 uint8_t ACPDpmEnabledLevels;
77 uint8_t VCEDpmEnabledLevels;
78
79 uint32_t DRAM_LOG_ADDR_H;
80 uint32_t DRAM_LOG_ADDR_L;
81 uint32_t DRAM_LOG_PHY_ADDR_H;
82 uint32_t DRAM_LOG_PHY_ADDR_L;
83 uint32_t DRAM_LOG_BUFF_SIZE;
84 uint32_t UlvEnterC;
85 uint32_t UlvTime;
86 uint32_t Reserved[3];
87
88};
89
90typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
91
92struct SMU7_Discrete_VoltageLevel
93{
94 uint16_t Voltage;
95 uint16_t StdVoltageHiSidd;
96 uint16_t StdVoltageLoSidd;
97 uint8_t Smio;
98 uint8_t padding;
99};
100
101typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
102
103struct SMU7_Discrete_GraphicsLevel
104{
105 uint32_t Flags;
106 uint32_t MinVddc;
107 uint32_t MinVddcPhases;
108
109 uint32_t SclkFrequency;
110
111 uint8_t padding1[2];
112 uint16_t ActivityLevel;
113
114 uint32_t CgSpllFuncCntl3;
115 uint32_t CgSpllFuncCntl4;
116 uint32_t SpllSpreadSpectrum;
117 uint32_t SpllSpreadSpectrum2;
118 uint32_t CcPwrDynRm;
119 uint32_t CcPwrDynRm1;
120 uint8_t SclkDid;
121 uint8_t DisplayWatermark;
122 uint8_t EnabledForActivity;
123 uint8_t EnabledForThrottle;
124 uint8_t UpH;
125 uint8_t DownH;
126 uint8_t VoltageDownH;
127 uint8_t PowerThrottle;
128 uint8_t DeepSleepDivId;
129 uint8_t padding[3];
130};
131
132typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
133
134struct SMU7_Discrete_ACPILevel
135{
136 uint32_t Flags;
137 uint32_t MinVddc;
138 uint32_t MinVddcPhases;
139 uint32_t SclkFrequency;
140 uint8_t SclkDid;
141 uint8_t DisplayWatermark;
142 uint8_t DeepSleepDivId;
143 uint8_t padding;
144 uint32_t CgSpllFuncCntl;
145 uint32_t CgSpllFuncCntl2;
146 uint32_t CgSpllFuncCntl3;
147 uint32_t CgSpllFuncCntl4;
148 uint32_t SpllSpreadSpectrum;
149 uint32_t SpllSpreadSpectrum2;
150 uint32_t CcPwrDynRm;
151 uint32_t CcPwrDynRm1;
152};
153
154typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
155
156struct SMU7_Discrete_Ulv
157{
158 uint32_t CcPwrDynRm;
159 uint32_t CcPwrDynRm1;
160 uint16_t VddcOffset;
161 uint8_t VddcOffsetVid;
162 uint8_t VddcPhase;
163 uint32_t Reserved;
164};
165
166typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
167
168struct SMU7_Discrete_MemoryLevel
169{
170 uint32_t MinVddc;
171 uint32_t MinVddcPhases;
172 uint32_t MinVddci;
173 uint32_t MinMvdd;
174
175 uint32_t MclkFrequency;
176
177 uint8_t EdcReadEnable;
178 uint8_t EdcWriteEnable;
179 uint8_t RttEnable;
180 uint8_t StutterEnable;
181
182 uint8_t StrobeEnable;
183 uint8_t StrobeRatio;
184 uint8_t EnabledForThrottle;
185 uint8_t EnabledForActivity;
186
187 uint8_t UpH;
188 uint8_t DownH;
189 uint8_t VoltageDownH;
190 uint8_t padding;
191
192 uint16_t ActivityLevel;
193 uint8_t DisplayWatermark;
194 uint8_t padding1;
195
196 uint32_t MpllFuncCntl;
197 uint32_t MpllFuncCntl_1;
198 uint32_t MpllFuncCntl_2;
199 uint32_t MpllAdFuncCntl;
200 uint32_t MpllDqFuncCntl;
201 uint32_t MclkPwrmgtCntl;
202 uint32_t DllCntl;
203 uint32_t MpllSs1;
204 uint32_t MpllSs2;
205};
206
207typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
208
209struct SMU7_Discrete_LinkLevel
210{
211 uint8_t PcieGenSpeed;
212 uint8_t PcieLaneCount;
213 uint8_t EnabledForActivity;
214 uint8_t Padding;
215 uint32_t DownT;
216 uint32_t UpT;
217 uint32_t Reserved;
218};
219
220typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
221
222
223struct SMU7_Discrete_MCArbDramTimingTableEntry
224{
225 uint32_t McArbDramTiming;
226 uint32_t McArbDramTiming2;
227 uint8_t McArbBurstTime;
228 uint8_t padding[3];
229};
230
231typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
232
233struct SMU7_Discrete_MCArbDramTimingTable
234{
235 SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
236};
237
238typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
239
240struct SMU7_Discrete_UvdLevel
241{
242 uint32_t VclkFrequency;
243 uint32_t DclkFrequency;
244 uint16_t MinVddc;
245 uint8_t MinVddcPhases;
246 uint8_t VclkDivider;
247 uint8_t DclkDivider;
248 uint8_t padding[3];
249};
250
251typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
252
253struct SMU7_Discrete_ExtClkLevel
254{
255 uint32_t Frequency;
256 uint16_t MinVoltage;
257 uint8_t MinPhases;
258 uint8_t Divider;
259};
260
261typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
262
263struct SMU7_Discrete_StateInfo
264{
265 uint32_t SclkFrequency;
266 uint32_t MclkFrequency;
267 uint32_t VclkFrequency;
268 uint32_t DclkFrequency;
269 uint32_t SamclkFrequency;
270 uint32_t AclkFrequency;
271 uint32_t EclkFrequency;
272 uint16_t MvddVoltage;
273 uint16_t padding16;
274 uint8_t DisplayWatermark;
275 uint8_t McArbIndex;
276 uint8_t McRegIndex;
277 uint8_t SeqIndex;
278 uint8_t SclkDid;
279 int8_t SclkIndex;
280 int8_t MclkIndex;
281 uint8_t PCIeGen;
282
283};
284
285typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
286
287
288struct SMU7_Discrete_DpmTable
289{
290 SMU7_PIDController GraphicsPIDController;
291 SMU7_PIDController MemoryPIDController;
292 SMU7_PIDController LinkPIDController;
293
294 uint32_t SystemFlags;
295
296
297 uint32_t SmioMaskVddcVid;
298 uint32_t SmioMaskVddcPhase;
299 uint32_t SmioMaskVddciVid;
300 uint32_t SmioMaskMvddVid;
301
302 uint32_t VddcLevelCount;
303 uint32_t VddciLevelCount;
304 uint32_t MvddLevelCount;
305
306 SMU7_Discrete_VoltageLevel VddcLevel [SMU7_MAX_LEVELS_VDDC];
307// SMU7_Discrete_VoltageLevel VddcStandardReference [SMU7_MAX_LEVELS_VDDC];
308 SMU7_Discrete_VoltageLevel VddciLevel [SMU7_MAX_LEVELS_VDDCI];
309 SMU7_Discrete_VoltageLevel MvddLevel [SMU7_MAX_LEVELS_MVDD];
310
311 uint8_t GraphicsDpmLevelCount;
312 uint8_t MemoryDpmLevelCount;
313 uint8_t LinkLevelCount;
314 uint8_t UvdLevelCount;
315 uint8_t VceLevelCount;
316 uint8_t AcpLevelCount;
317 uint8_t SamuLevelCount;
318 uint8_t MasterDeepSleepControl;
319 uint32_t Reserved[5];
320// uint32_t SamuDefaultLevel;
321
322 SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS];
323 SMU7_Discrete_MemoryLevel MemoryACPILevel;
324 SMU7_Discrete_MemoryLevel MemoryLevel [SMU7_MAX_LEVELS_MEMORY];
325 SMU7_Discrete_LinkLevel LinkLevel [SMU7_MAX_LEVELS_LINK];
326 SMU7_Discrete_ACPILevel ACPILevel;
327 SMU7_Discrete_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
328 SMU7_Discrete_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
329 SMU7_Discrete_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
330 SMU7_Discrete_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
331 SMU7_Discrete_Ulv Ulv;
332
333 uint32_t SclkStepSize;
334 uint32_t Smio [SMU7_MAX_ENTRIES_SMIO];
335
336 uint8_t UvdBootLevel;
337 uint8_t VceBootLevel;
338 uint8_t AcpBootLevel;
339 uint8_t SamuBootLevel;
340
341 uint8_t UVDInterval;
342 uint8_t VCEInterval;
343 uint8_t ACPInterval;
344 uint8_t SAMUInterval;
345
346 uint8_t GraphicsBootLevel;
347 uint8_t GraphicsVoltageChangeEnable;
348 uint8_t GraphicsThermThrottleEnable;
349 uint8_t GraphicsInterval;
350
351 uint8_t VoltageInterval;
352 uint8_t ThermalInterval;
353 uint16_t TemperatureLimitHigh;
354
355 uint16_t TemperatureLimitLow;
356 uint8_t MemoryBootLevel;
357 uint8_t MemoryVoltageChangeEnable;
358
359 uint8_t MemoryInterval;
360 uint8_t MemoryThermThrottleEnable;
361 uint16_t VddcVddciDelta;
362
363 uint16_t VoltageResponseTime;
364 uint16_t PhaseResponseTime;
365
366 uint8_t PCIeBootLinkLevel;
367 uint8_t PCIeGenInterval;
368 uint8_t DTEInterval;
369 uint8_t DTEMode;
370
371 uint8_t SVI2Enable;
372 uint8_t VRHotGpio;
373 uint8_t AcDcGpio;
374 uint8_t ThermGpio;
375
376 uint16_t PPM_PkgPwrLimit;
377 uint16_t PPM_TemperatureLimit;
378
379 uint16_t DefaultTdp;
380 uint16_t TargetTdp;
381
382 uint16_t FpsHighT;
383 uint16_t FpsLowT;
384
385 uint16_t BAPMTI_R [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
386 uint16_t BAPMTI_RC [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
387
388 uint8_t DTEAmbientTempBase;
389 uint8_t DTETjOffset;
390 uint8_t GpuTjMax;
391 uint8_t GpuTjHyst;
392
393 uint16_t BootVddc;
394 uint16_t BootVddci;
395
396 uint16_t BootMVdd;
397 uint16_t padding;
398
399 uint32_t BAPM_TEMP_GRADIENT;
400
401 uint32_t LowSclkInterruptT;
402};
403
404typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
405
406#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
407#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
408
409struct SMU7_Discrete_MCRegisterAddress
410{
411 uint16_t s0;
412 uint16_t s1;
413};
414
415typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
416
417struct SMU7_Discrete_MCRegisterSet
418{
419 uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
420};
421
422typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
423
424struct SMU7_Discrete_MCRegisters
425{
426 uint8_t last;
427 uint8_t reserved[3];
428 SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
429 SMU7_Discrete_MCRegisterSet data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
430};
431
432typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
433
434struct SMU7_Discrete_PmFuses {
435 // dw0-dw1
436 uint8_t BapmVddCVidHiSidd[8];
437
438 // dw2-dw3
439 uint8_t BapmVddCVidLoSidd[8];
440
441 // dw4-dw5
442 uint8_t VddCVid[8];
443
444 // dw6
445 uint8_t SviLoadLineEn;
446 uint8_t SviLoadLineVddC;
447 uint8_t SviLoadLineTrimVddC;
448 uint8_t SviLoadLineOffsetVddC;
449
450 // dw7
451 uint16_t TDC_VDDC_PkgLimit;
452 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
453 uint8_t TDC_MAWt;
454
455 // dw8
456 uint8_t TdcWaterfallCtl;
457 uint8_t LPMLTemperatureMin;
458 uint8_t LPMLTemperatureMax;
459 uint8_t Reserved;
460
461 // dw9-dw10
462 uint8_t BapmVddCVidHiSidd2[8];
463
464 // dw11-dw12
465 uint32_t Reserved6[2];
466
467 // dw13-dw16
468 uint8_t GnbLPML[16];
469
470 // dw17
471 uint8_t GnbLPMLMaxVid;
472 uint8_t GnbLPMLMinVid;
473 uint8_t Reserved1[2];
474
475 // dw18
476 uint16_t BapmVddCBaseLeakageHiSidd;
477 uint16_t BapmVddCBaseLeakageLoSidd;
478};
479
480typedef struct SMU7_Discrete_PmFuses SMU7_Discrete_PmFuses;
481
482
483#pragma pack(pop)
484
485#endif
486
diff --git a/drivers/gpu/drm/radeon/smu7_fusion.h b/drivers/gpu/drm/radeon/smu7_fusion.h
new file mode 100644
index 000000000000..78ada9ffd508
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_fusion.h
@@ -0,0 +1,300 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef SMU7_FUSION_H
25#define SMU7_FUSION_H
26
27#include "smu7.h"
28
29#pragma pack(push, 1)
30
31#define SMU7_DTE_ITERATIONS 5
32#define SMU7_DTE_SOURCES 5
33#define SMU7_DTE_SINKS 3
34#define SMU7_NUM_CPU_TES 2
35#define SMU7_NUM_GPU_TES 1
36#define SMU7_NUM_NON_TES 2
37
38// All 'soft registers' should be uint32_t.
39struct SMU7_SoftRegisters
40{
41 uint32_t RefClockFrequency;
42 uint32_t PmTimerP;
43 uint32_t FeatureEnables;
44 uint32_t HandshakeDisables;
45
46 uint8_t DisplayPhy1Config;
47 uint8_t DisplayPhy2Config;
48 uint8_t DisplayPhy3Config;
49 uint8_t DisplayPhy4Config;
50
51 uint8_t DisplayPhy5Config;
52 uint8_t DisplayPhy6Config;
53 uint8_t DisplayPhy7Config;
54 uint8_t DisplayPhy8Config;
55
56 uint32_t AverageGraphicsA;
57 uint32_t AverageMemoryA;
58 uint32_t AverageGioA;
59
60 uint8_t SClkDpmEnabledLevels;
61 uint8_t MClkDpmEnabledLevels;
62 uint8_t LClkDpmEnabledLevels;
63 uint8_t PCIeDpmEnabledLevels;
64
65 uint8_t UVDDpmEnabledLevels;
66 uint8_t SAMUDpmEnabledLevels;
67 uint8_t ACPDpmEnabledLevels;
68 uint8_t VCEDpmEnabledLevels;
69
70 uint32_t DRAM_LOG_ADDR_H;
71 uint32_t DRAM_LOG_ADDR_L;
72 uint32_t DRAM_LOG_PHY_ADDR_H;
73 uint32_t DRAM_LOG_PHY_ADDR_L;
74 uint32_t DRAM_LOG_BUFF_SIZE;
75 uint32_t UlvEnterC;
76 uint32_t UlvTime;
77 uint32_t Reserved[3];
78
79};
80
81typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
82
83struct SMU7_Fusion_GraphicsLevel
84{
85 uint32_t MinVddNb;
86
87 uint32_t SclkFrequency;
88
89 uint8_t Vid;
90 uint8_t VidOffset;
91 uint16_t AT;
92
93 uint8_t PowerThrottle;
94 uint8_t GnbSlow;
95 uint8_t ForceNbPs1;
96 uint8_t SclkDid;
97
98 uint8_t DisplayWatermark;
99 uint8_t EnabledForActivity;
100 uint8_t EnabledForThrottle;
101 uint8_t UpH;
102
103 uint8_t DownH;
104 uint8_t VoltageDownH;
105 uint8_t DeepSleepDivId;
106
107 uint8_t ClkBypassCntl;
108
109 uint32_t reserved;
110};
111
112typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
113
114struct SMU7_Fusion_GIOLevel
115{
116 uint8_t EnabledForActivity;
117 uint8_t LclkDid;
118 uint8_t Vid;
119 uint8_t VoltageDownH;
120
121 uint32_t MinVddNb;
122
123 uint16_t ResidencyCounter;
124 uint8_t UpH;
125 uint8_t DownH;
126
127 uint32_t LclkFrequency;
128
129 uint8_t ActivityLevel;
130 uint8_t EnabledForThrottle;
131
132 uint8_t ClkBypassCntl;
133
134 uint8_t padding;
135};
136
137typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
138
139// UVD VCLK/DCLK state (level) definition.
140struct SMU7_Fusion_UvdLevel
141{
142 uint32_t VclkFrequency;
143 uint32_t DclkFrequency;
144 uint16_t MinVddNb;
145 uint8_t VclkDivider;
146 uint8_t DclkDivider;
147
148 uint8_t VClkBypassCntl;
149 uint8_t DClkBypassCntl;
150
151 uint8_t padding[2];
152
153};
154
155typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
156
157// Clocks for other external blocks (VCE, ACP, SAMU).
158struct SMU7_Fusion_ExtClkLevel
159{
160 uint32_t Frequency;
161 uint16_t MinVoltage;
162 uint8_t Divider;
163 uint8_t ClkBypassCntl;
164
165 uint32_t Reserved;
166};
167typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
168
169struct SMU7_Fusion_ACPILevel
170{
171 uint32_t Flags;
172 uint32_t MinVddNb;
173 uint32_t SclkFrequency;
174 uint8_t SclkDid;
175 uint8_t GnbSlow;
176 uint8_t ForceNbPs1;
177 uint8_t DisplayWatermark;
178 uint8_t DeepSleepDivId;
179 uint8_t padding[3];
180};
181
182typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
183
184struct SMU7_Fusion_NbDpm
185{
186 uint8_t DpmXNbPsHi;
187 uint8_t DpmXNbPsLo;
188 uint8_t Dpm0PgNbPsHi;
189 uint8_t Dpm0PgNbPsLo;
190 uint8_t EnablePsi1;
191 uint8_t SkipDPM0;
192 uint8_t SkipPG;
193 uint8_t Hysteresis;
194 uint8_t EnableDpmPstatePoll;
195 uint8_t padding[3];
196};
197
198typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
199
200struct SMU7_Fusion_StateInfo
201{
202 uint32_t SclkFrequency;
203 uint32_t LclkFrequency;
204 uint32_t VclkFrequency;
205 uint32_t DclkFrequency;
206 uint32_t SamclkFrequency;
207 uint32_t AclkFrequency;
208 uint32_t EclkFrequency;
209 uint8_t DisplayWatermark;
210 uint8_t McArbIndex;
211 int8_t SclkIndex;
212 int8_t MclkIndex;
213};
214
215typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
216
217struct SMU7_Fusion_DpmTable
218{
219 uint32_t SystemFlags;
220
221 SMU7_PIDController GraphicsPIDController;
222 SMU7_PIDController GioPIDController;
223
224 uint8_t GraphicsDpmLevelCount;
225 uint8_t GIOLevelCount;
226 uint8_t UvdLevelCount;
227 uint8_t VceLevelCount;
228
229 uint8_t AcpLevelCount;
230 uint8_t SamuLevelCount;
231 uint16_t FpsHighT;
232
233 SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE];
234 SMU7_Fusion_ACPILevel ACPILevel;
235 SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
236 SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
237 SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
238 SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
239
240 uint8_t UvdBootLevel;
241 uint8_t VceBootLevel;
242 uint8_t AcpBootLevel;
243 uint8_t SamuBootLevel;
244 uint8_t UVDInterval;
245 uint8_t VCEInterval;
246 uint8_t ACPInterval;
247 uint8_t SAMUInterval;
248
249 uint8_t GraphicsBootLevel;
250 uint8_t GraphicsInterval;
251 uint8_t GraphicsThermThrottleEnable;
252 uint8_t GraphicsVoltageChangeEnable;
253
254 uint8_t GraphicsClkSlowEnable;
255 uint8_t GraphicsClkSlowDivider;
256 uint16_t FpsLowT;
257
258 uint32_t DisplayCac;
259 uint32_t LowSclkInterruptT;
260
261 uint32_t DRAM_LOG_ADDR_H;
262 uint32_t DRAM_LOG_ADDR_L;
263 uint32_t DRAM_LOG_PHY_ADDR_H;
264 uint32_t DRAM_LOG_PHY_ADDR_L;
265 uint32_t DRAM_LOG_BUFF_SIZE;
266
267};
268
269struct SMU7_Fusion_GIODpmTable
270{
271
272 SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO];
273
274 SMU7_PIDController GioPIDController;
275
276 uint32_t GIOLevelCount;
277
278 uint8_t Enable;
279 uint8_t GIOVoltageChangeEnable;
280 uint8_t GIOBootLevel;
281 uint8_t padding;
282 uint8_t padding1[2];
283 uint8_t TargetState;
284 uint8_t CurrenttState;
285 uint8_t ThrottleOnHtc;
286 uint8_t ThermThrottleStatus;
287 uint8_t ThermThrottleTempSelect;
288 uint8_t ThermThrottleEnable;
289 uint16_t TemperatureLimitHigh;
290 uint16_t TemperatureLimitLow;
291
292};
293
294typedef struct SMU7_Fusion_DpmTable SMU7_Fusion_DpmTable;
295typedef struct SMU7_Fusion_GIODpmTable SMU7_Fusion_GIODpmTable;
296
297#pragma pack(pop)
298
299#endif
300
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index c0a850319908..864761c0120e 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1483,6 +1483,7 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
1483 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 1483 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1484 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 1484 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1485 for (i = 0; i < state_array->ucNumEntries; i++) { 1485 for (i = 0; i < state_array->ucNumEntries; i++) {
1486 u8 *idx;
1486 power_state = (union pplib_power_state *)power_state_offset; 1487 power_state = (union pplib_power_state *)power_state_offset;
1487 non_clock_array_index = power_state->v2.nonClockInfoIndex; 1488 non_clock_array_index = power_state->v2.nonClockInfoIndex;
1488 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 1489 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -1496,12 +1497,15 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
1496 } 1497 }
1497 rdev->pm.dpm.ps[i].ps_priv = ps; 1498 rdev->pm.dpm.ps[i].ps_priv = ps;
1498 k = 0; 1499 k = 0;
1500 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
1499 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 1501 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
1500 clock_array_index = power_state->v2.clockInfoIndex[j]; 1502 clock_array_index = idx[j];
1501 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 1503 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
1502 break; 1504 break;
1505
1503 clock_info = (union pplib_clock_info *) 1506 clock_info = (union pplib_clock_info *)
1504 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 1507 ((u8 *)&clock_info_array->clockInfo[0] +
1508 (clock_array_index * clock_info_array->ucEntrySize));
1505 sumo_parse_pplib_clock_info(rdev, 1509 sumo_parse_pplib_clock_info(rdev,
1506 &rdev->pm.dpm.ps[i], k, 1510 &rdev->pm.dpm.ps[i], k,
1507 clock_info); 1511 clock_info);
@@ -1530,6 +1534,20 @@ u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
1530 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 1534 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
1531} 1535}
1532 1536
1537u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
1538 struct sumo_vid_mapping_table *vid_mapping_table,
1539 u32 vid_7bit)
1540{
1541 u32 i;
1542
1543 for (i = 0; i < vid_mapping_table->num_entries; i++) {
1544 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
1545 return vid_mapping_table->entries[i].vid_2bit;
1546 }
1547
1548 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
1549}
1550
1533static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev, 1551static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev,
1534 u32 vid_2bit) 1552 u32 vid_2bit)
1535{ 1553{
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h
index 07dda299c784..db1ea32a907b 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.h
+++ b/drivers/gpu/drm/radeon/sumo_dpm.h
@@ -202,6 +202,9 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
202u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev, 202u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
203 struct sumo_vid_mapping_table *vid_mapping_table, 203 struct sumo_vid_mapping_table *vid_mapping_table,
204 u32 vid_2bit); 204 u32 vid_2bit);
205u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
206 struct sumo_vid_mapping_table *vid_mapping_table,
207 u32 vid_7bit);
205u32 sumo_get_sleep_divider_from_id(u32 id); 208u32 sumo_get_sleep_divider_from_id(u32 id);
206u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 209u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
207 u32 sclk, 210 u32 sclk,
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index a1eb5f59939f..b07b7b8f1aff 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1675,6 +1675,7 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
1675 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 1675 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1676 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 1676 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1677 for (i = 0; i < state_array->ucNumEntries; i++) { 1677 for (i = 0; i < state_array->ucNumEntries; i++) {
1678 u8 *idx;
1678 power_state = (union pplib_power_state *)power_state_offset; 1679 power_state = (union pplib_power_state *)power_state_offset;
1679 non_clock_array_index = power_state->v2.nonClockInfoIndex; 1680 non_clock_array_index = power_state->v2.nonClockInfoIndex;
1680 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 1681 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -1688,14 +1689,16 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
1688 } 1689 }
1689 rdev->pm.dpm.ps[i].ps_priv = ps; 1690 rdev->pm.dpm.ps[i].ps_priv = ps;
1690 k = 0; 1691 k = 0;
1692 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
1691 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 1693 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
1692 clock_array_index = power_state->v2.clockInfoIndex[j]; 1694 clock_array_index = idx[j];
1693 if (clock_array_index >= clock_info_array->ucNumEntries) 1695 if (clock_array_index >= clock_info_array->ucNumEntries)
1694 continue; 1696 continue;
1695 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 1697 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
1696 break; 1698 break;
1697 clock_info = (union pplib_clock_info *) 1699 clock_info = (union pplib_clock_info *)
1698 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 1700 ((u8 *)&clock_info_array->clockInfo[0] +
1701 (clock_array_index * clock_info_array->ucEntrySize));
1699 trinity_parse_pplib_clock_info(rdev, 1702 trinity_parse_pplib_clock_info(rdev,
1700 &rdev->pm.dpm.ps[i], k, 1703 &rdev->pm.dpm.ps[i], k,
1701 clock_info); 1704 clock_info);
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
new file mode 100644
index 000000000000..7266805d9786
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -0,0 +1,436 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <drm/drmP.h>
26#include "radeon.h"
27#include "radeon_asic.h"
28#include "r600d.h"
29
30/**
31 * uvd_v1_0_get_rptr - get read pointer
32 *
33 * @rdev: radeon_device pointer
34 * @ring: radeon_ring pointer
35 *
36 * Returns the current hardware read pointer
37 */
38uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
39 struct radeon_ring *ring)
40{
41 return RREG32(UVD_RBC_RB_RPTR);
42}
43
44/**
45 * uvd_v1_0_get_wptr - get write pointer
46 *
47 * @rdev: radeon_device pointer
48 * @ring: radeon_ring pointer
49 *
50 * Returns the current hardware write pointer
51 */
52uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
53 struct radeon_ring *ring)
54{
55 return RREG32(UVD_RBC_RB_WPTR);
56}
57
58/**
59 * uvd_v1_0_set_wptr - set write pointer
60 *
61 * @rdev: radeon_device pointer
62 * @ring: radeon_ring pointer
63 *
64 * Commits the write pointer to the hardware
65 */
66void uvd_v1_0_set_wptr(struct radeon_device *rdev,
67 struct radeon_ring *ring)
68{
69 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
70}
71
72/**
73 * uvd_v1_0_init - start and test UVD block
74 *
75 * @rdev: radeon_device pointer
76 *
77 * Initialize the hardware, boot up the VCPU and do some testing
78 */
79int uvd_v1_0_init(struct radeon_device *rdev)
80{
81 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
82 uint32_t tmp;
83 int r;
84
85 /* raise clocks while booting up the VCPU */
86 radeon_set_uvd_clocks(rdev, 53300, 40000);
87
88 r = uvd_v1_0_start(rdev);
89 if (r)
90 goto done;
91
92 ring->ready = true;
93 r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
94 if (r) {
95 ring->ready = false;
96 goto done;
97 }
98
99 r = radeon_ring_lock(rdev, ring, 10);
100 if (r) {
101 DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
102 goto done;
103 }
104
105 tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
106 radeon_ring_write(ring, tmp);
107 radeon_ring_write(ring, 0xFFFFF);
108
109 tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
110 radeon_ring_write(ring, tmp);
111 radeon_ring_write(ring, 0xFFFFF);
112
113 tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
114 radeon_ring_write(ring, tmp);
115 radeon_ring_write(ring, 0xFFFFF);
116
117 /* Clear timeout status bits */
118 radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
119 radeon_ring_write(ring, 0x8);
120
121 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
122 radeon_ring_write(ring, 3);
123
124 radeon_ring_unlock_commit(rdev, ring);
125
126done:
127 /* lower clocks again */
128 radeon_set_uvd_clocks(rdev, 0, 0);
129
130 if (!r)
131 DRM_INFO("UVD initialized successfully.\n");
132
133 return r;
134}
135
136/**
137 * uvd_v1_0_fini - stop the hardware block
138 *
139 * @rdev: radeon_device pointer
140 *
141 * Stop the UVD block, mark ring as not ready any more
142 */
143void uvd_v1_0_fini(struct radeon_device *rdev)
144{
145 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
146
147 uvd_v1_0_stop(rdev);
148 ring->ready = false;
149}
150
151/**
152 * uvd_v1_0_start - start UVD block
153 *
154 * @rdev: radeon_device pointer
155 *
156 * Setup and start the UVD block
157 */
158int uvd_v1_0_start(struct radeon_device *rdev)
159{
160 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
161 uint32_t rb_bufsz;
162 int i, j, r;
163
164 /* disable byte swapping */
165 u32 lmi_swap_cntl = 0;
166 u32 mp_swap_cntl = 0;
167
168 /* disable clock gating */
169 WREG32(UVD_CGC_GATE, 0);
170
171 /* disable interupt */
172 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
173
174 /* Stall UMC and register bus before resetting VCPU */
175 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
176 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
177 mdelay(1);
178
179 /* put LMI, VCPU, RBC etc... into reset */
180 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
181 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
182 CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
183 mdelay(5);
184
185 /* take UVD block out of reset */
186 WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
187 mdelay(5);
188
189 /* initialize UVD memory controller */
190 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
191 (1 << 21) | (1 << 9) | (1 << 20));
192
193#ifdef __BIG_ENDIAN
194 /* swap (8 in 32) RB and IB */
195 lmi_swap_cntl = 0xa;
196 mp_swap_cntl = 0;
197#endif
198 WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
199 WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
200
201 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
202 WREG32(UVD_MPC_SET_MUXA1, 0x0);
203 WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
204 WREG32(UVD_MPC_SET_MUXB1, 0x0);
205 WREG32(UVD_MPC_SET_ALU, 0);
206 WREG32(UVD_MPC_SET_MUX, 0x88);
207
208 /* take all subblocks out of reset, except VCPU */
209 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
210 mdelay(5);
211
212 /* enable VCPU clock */
213 WREG32(UVD_VCPU_CNTL, 1 << 9);
214
215 /* enable UMC */
216 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
217
218 /* boot up the VCPU */
219 WREG32(UVD_SOFT_RESET, 0);
220 mdelay(10);
221
222 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
223
224 for (i = 0; i < 10; ++i) {
225 uint32_t status;
226 for (j = 0; j < 100; ++j) {
227 status = RREG32(UVD_STATUS);
228 if (status & 2)
229 break;
230 mdelay(10);
231 }
232 r = 0;
233 if (status & 2)
234 break;
235
236 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
237 WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
238 mdelay(10);
239 WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
240 mdelay(10);
241 r = -1;
242 }
243
244 if (r) {
245 DRM_ERROR("UVD not responding, giving up!!!\n");
246 return r;
247 }
248
249 /* enable interupt */
250 WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
251
252 /* force RBC into idle state */
253 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
254
255 /* Set the write pointer delay */
256 WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
257
258 /* programm the 4GB memory segment for rptr and ring buffer */
259 WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
260 (0x7 << 16) | (0x1 << 31));
261
262 /* Initialize the ring buffer's read and write pointers */
263 WREG32(UVD_RBC_RB_RPTR, 0x0);
264
265 ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
266 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
267
268 /* set the ring address */
269 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
270
271 /* Set ring buffer size */
272 rb_bufsz = order_base_2(ring->ring_size);
273 rb_bufsz = (0x1 << 8) | rb_bufsz;
274 WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
275
276 return 0;
277}
278
279/**
280 * uvd_v1_0_stop - stop UVD block
281 *
282 * @rdev: radeon_device pointer
283 *
284 * stop the UVD block
285 */
286void uvd_v1_0_stop(struct radeon_device *rdev)
287{
288 /* force RBC into idle state */
289 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
290
291 /* Stall UMC and register bus before resetting VCPU */
292 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
293 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
294 mdelay(1);
295
296 /* put VCPU into reset */
297 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
298 mdelay(5);
299
300 /* disable VCPU clock */
301 WREG32(UVD_VCPU_CNTL, 0x0);
302
303 /* Unstall UMC and register bus */
304 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
305 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
306}
307
308/**
309 * uvd_v1_0_ring_test - register write test
310 *
311 * @rdev: radeon_device pointer
312 * @ring: radeon_ring pointer
313 *
314 * Test if we can successfully write to the context register
315 */
316int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
317{
318 uint32_t tmp = 0;
319 unsigned i;
320 int r;
321
322 WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
323 r = radeon_ring_lock(rdev, ring, 3);
324 if (r) {
325 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
326 ring->idx, r);
327 return r;
328 }
329 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
330 radeon_ring_write(ring, 0xDEADBEEF);
331 radeon_ring_unlock_commit(rdev, ring);
332 for (i = 0; i < rdev->usec_timeout; i++) {
333 tmp = RREG32(UVD_CONTEXT_ID);
334 if (tmp == 0xDEADBEEF)
335 break;
336 DRM_UDELAY(1);
337 }
338
339 if (i < rdev->usec_timeout) {
340 DRM_INFO("ring test on %d succeeded in %d usecs\n",
341 ring->idx, i);
342 } else {
343 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
344 ring->idx, tmp);
345 r = -EINVAL;
346 }
347 return r;
348}
349
350/**
351 * uvd_v1_0_semaphore_emit - emit semaphore command
352 *
353 * @rdev: radeon_device pointer
354 * @ring: radeon_ring pointer
355 * @semaphore: semaphore to emit commands for
356 * @emit_wait: true if we should emit a wait command
357 *
358 * Emit a semaphore command (either wait or signal) to the UVD ring.
359 */
360void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
361 struct radeon_ring *ring,
362 struct radeon_semaphore *semaphore,
363 bool emit_wait)
364{
365 uint64_t addr = semaphore->gpu_addr;
366
367 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
368 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
369
370 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
371 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
372
373 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
374 radeon_ring_write(ring, emit_wait ? 1 : 0);
375}
376
377/**
378 * uvd_v1_0_ib_execute - execute indirect buffer
379 *
380 * @rdev: radeon_device pointer
381 * @ib: indirect buffer to execute
382 *
383 * Write ring commands to execute the indirect buffer
384 */
385void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
386{
387 struct radeon_ring *ring = &rdev->ring[ib->ring];
388
389 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
390 radeon_ring_write(ring, ib->gpu_addr);
391 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
392 radeon_ring_write(ring, ib->length_dw);
393}
394
395/**
396 * uvd_v1_0_ib_test - test ib execution
397 *
398 * @rdev: radeon_device pointer
399 * @ring: radeon_ring pointer
400 *
401 * Test if we can successfully execute an IB
402 */
403int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
404{
405 struct radeon_fence *fence = NULL;
406 int r;
407
408 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
409 if (r) {
410 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
411 return r;
412 }
413
414 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
415 if (r) {
416 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
417 goto error;
418 }
419
420 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
421 if (r) {
422 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
423 goto error;
424 }
425
426 r = radeon_fence_wait(fence, false);
427 if (r) {
428 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
429 goto error;
430 }
431 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
432error:
433 radeon_fence_unref(&fence);
434 radeon_set_uvd_clocks(rdev, 0, 0);
435 return r;
436}
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
new file mode 100644
index 000000000000..b19ef4951085
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "radeon.h"
28#include "radeon_asic.h"
29#include "rv770d.h"
30
31/**
32 * uvd_v2_2_fence_emit - emit an fence & trap command
33 *
34 * @rdev: radeon_device pointer
35 * @fence: fence to emit
36 *
37 * Write a fence and a trap command to the ring.
38 */
39void uvd_v2_2_fence_emit(struct radeon_device *rdev,
40 struct radeon_fence *fence)
41{
42 struct radeon_ring *ring = &rdev->ring[fence->ring];
43 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
44
45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
46 radeon_ring_write(ring, fence->seq);
47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
48 radeon_ring_write(ring, addr & 0xffffffff);
49 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
50 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
51 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
52 radeon_ring_write(ring, 0);
53
54 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
55 radeon_ring_write(ring, 0);
56 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
57 radeon_ring_write(ring, 0);
58 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
59 radeon_ring_write(ring, 2);
60 return;
61}
62
63/**
64 * uvd_v2_2_resume - memory controller programming
65 *
66 * @rdev: radeon_device pointer
67 *
68 * Let the UVD memory controller know it's offsets
69 */
70int uvd_v2_2_resume(struct radeon_device *rdev)
71{
72 uint64_t addr;
73 uint32_t chip_id, size;
74 int r;
75
76 r = radeon_uvd_resume(rdev);
77 if (r)
78 return r;
79
80 /* programm the VCPU memory controller bits 0-27 */
81 addr = rdev->uvd.gpu_addr >> 3;
82 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
83 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
84 WREG32(UVD_VCPU_CACHE_SIZE0, size);
85
86 addr += size;
87 size = RADEON_UVD_STACK_SIZE >> 3;
88 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
89 WREG32(UVD_VCPU_CACHE_SIZE1, size);
90
91 addr += size;
92 size = RADEON_UVD_HEAP_SIZE >> 3;
93 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
94 WREG32(UVD_VCPU_CACHE_SIZE2, size);
95
96 /* bits 28-31 */
97 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
98 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
99
100 /* bits 32-39 */
101 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
102 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
103
104 /* tell firmware which hardware it is running on */
105 switch (rdev->family) {
106 default:
107 return -EINVAL;
108 case CHIP_RV710:
109 chip_id = 0x01000005;
110 break;
111 case CHIP_RV730:
112 chip_id = 0x01000006;
113 break;
114 case CHIP_RV740:
115 chip_id = 0x01000007;
116 break;
117 case CHIP_CYPRESS:
118 case CHIP_HEMLOCK:
119 chip_id = 0x01000008;
120 break;
121 case CHIP_JUNIPER:
122 chip_id = 0x01000009;
123 break;
124 case CHIP_REDWOOD:
125 chip_id = 0x0100000a;
126 break;
127 case CHIP_CEDAR:
128 chip_id = 0x0100000b;
129 break;
130 case CHIP_SUMO:
131 case CHIP_SUMO2:
132 chip_id = 0x0100000c;
133 break;
134 case CHIP_PALM:
135 chip_id = 0x0100000e;
136 break;
137 case CHIP_CAYMAN:
138 chip_id = 0x0100000f;
139 break;
140 case CHIP_BARTS:
141 chip_id = 0x01000010;
142 break;
143 case CHIP_TURKS:
144 chip_id = 0x01000011;
145 break;
146 case CHIP_CAICOS:
147 chip_id = 0x01000012;
148 break;
149 case CHIP_TAHITI:
150 chip_id = 0x01000014;
151 break;
152 case CHIP_VERDE:
153 chip_id = 0x01000015;
154 break;
155 case CHIP_PITCAIRN:
156 chip_id = 0x01000016;
157 break;
158 case CHIP_ARUBA:
159 chip_id = 0x01000017;
160 break;
161 }
162 WREG32(UVD_VCPU_CHIP_ID, chip_id);
163
164 return 0;
165}
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
new file mode 100644
index 000000000000..5b6fa1f62d4e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <drm/drmP.h>
26#include "radeon.h"
27#include "radeon_asic.h"
28#include "nid.h"
29
30/**
31 * uvd_v3_1_semaphore_emit - emit semaphore command
32 *
33 * @rdev: radeon_device pointer
34 * @ring: radeon_ring pointer
35 * @semaphore: semaphore to emit commands for
36 * @emit_wait: true if we should emit a wait command
37 *
38 * Emit a semaphore command (either wait or signal) to the UVD ring.
39 */
40void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
41 struct radeon_ring *ring,
42 struct radeon_semaphore *semaphore,
43 bool emit_wait)
44{
45 uint64_t addr = semaphore->gpu_addr;
46
47 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
48 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
49
50 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
51 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
52
53 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
54 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
55}
diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c
new file mode 100644
index 000000000000..d04d5073eef2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v4_2.c
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "radeon.h"
28#include "radeon_asic.h"
29#include "cikd.h"
30
31/**
32 * uvd_v4_2_resume - memory controller programming
33 *
34 * @rdev: radeon_device pointer
35 *
36 * Let the UVD memory controller know it's offsets
37 */
38int uvd_v4_2_resume(struct radeon_device *rdev)
39{
40 uint64_t addr;
41 uint32_t size;
42
43 /* programm the VCPU memory controller bits 0-27 */
44 addr = rdev->uvd.gpu_addr >> 3;
45 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
46 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
47 WREG32(UVD_VCPU_CACHE_SIZE0, size);
48
49 addr += size;
50 size = RADEON_UVD_STACK_SIZE >> 3;
51 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
52 WREG32(UVD_VCPU_CACHE_SIZE1, size);
53
54 addr += size;
55 size = RADEON_UVD_HEAP_SIZE >> 3;
56 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
57 WREG32(UVD_VCPU_CACHE_SIZE2, size);
58
59 /* bits 28-31 */
60 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
61 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
62
63 /* bits 32-39 */
64 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
65 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
66
67 return 0;
68}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 72887df8dd76..c590cd9dca0b 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -7,3 +7,10 @@ config DRM_RCAR_DU
7 help 7 help
8 Choose this option if you have an R-Car chipset. 8 Choose this option if you have an R-Car chipset.
9 If M is selected the module will be called rcar-du-drm. 9 If M is selected the module will be called rcar-du-drm.
10
11config DRM_RCAR_LVDS
12 bool "R-Car DU LVDS Encoder Support"
13 depends on DRM_RCAR_DU
14 help
15 Enable support the R-Car Display Unit embedded LVDS encoders
16 (currently only on R8A7790).
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 7333c0094015..12b8d4477835 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -1,8 +1,12 @@
1rcar-du-drm-y := rcar_du_crtc.o \ 1rcar-du-drm-y := rcar_du_crtc.o \
2 rcar_du_drv.o \ 2 rcar_du_drv.o \
3 rcar_du_encoder.o \
4 rcar_du_group.o \
3 rcar_du_kms.o \ 5 rcar_du_kms.o \
4 rcar_du_lvds.o \ 6 rcar_du_lvdscon.o \
5 rcar_du_plane.o \ 7 rcar_du_plane.o \
6 rcar_du_vga.o 8 rcar_du_vgacon.o
7 9
8obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o 10rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
11
12obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 24183fb93592..a9d24e4bf792 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -23,30 +23,26 @@
23#include "rcar_du_crtc.h" 23#include "rcar_du_crtc.h"
24#include "rcar_du_drv.h" 24#include "rcar_du_drv.h"
25#include "rcar_du_kms.h" 25#include "rcar_du_kms.h"
26#include "rcar_du_lvds.h"
27#include "rcar_du_plane.h" 26#include "rcar_du_plane.h"
28#include "rcar_du_regs.h" 27#include "rcar_du_regs.h"
29#include "rcar_du_vga.h"
30
31#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
32 28
33static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg) 29static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
34{ 30{
35 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 31 struct rcar_du_device *rcdu = rcrtc->group->dev;
36 32
37 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg); 33 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
38} 34}
39 35
40static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data) 36static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
41{ 37{
42 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 38 struct rcar_du_device *rcdu = rcrtc->group->dev;
43 39
44 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data); 40 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
45} 41}
46 42
47static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr) 43static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
48{ 44{
49 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 45 struct rcar_du_device *rcdu = rcrtc->group->dev;
50 46
51 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, 47 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
52 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr); 48 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
@@ -54,7 +50,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
54 50
55static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set) 51static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
56{ 52{
57 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 53 struct rcar_du_device *rcdu = rcrtc->group->dev;
58 54
59 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, 55 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
60 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set); 56 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
@@ -63,29 +59,48 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
63static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg, 59static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
64 u32 clr, u32 set) 60 u32 clr, u32 set)
65{ 61{
66 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 62 struct rcar_du_device *rcdu = rcrtc->group->dev;
67 u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg); 63 u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
68 64
69 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set); 65 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
70} 66}
71 67
68static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
69{
70 int ret;
71
72 ret = clk_prepare_enable(rcrtc->clock);
73 if (ret < 0)
74 return ret;
75
76 ret = rcar_du_group_get(rcrtc->group);
77 if (ret < 0)
78 clk_disable_unprepare(rcrtc->clock);
79
80 return ret;
81}
82
83static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
84{
85 rcar_du_group_put(rcrtc->group);
86 clk_disable_unprepare(rcrtc->clock);
87}
88
72static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) 89static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
73{ 90{
74 struct drm_crtc *crtc = &rcrtc->crtc; 91 const struct drm_display_mode *mode = &rcrtc->crtc.mode;
75 struct rcar_du_device *rcdu = crtc->dev->dev_private;
76 const struct drm_display_mode *mode = &crtc->mode;
77 unsigned long clk; 92 unsigned long clk;
78 u32 value; 93 u32 value;
79 u32 div; 94 u32 div;
80 95
81 /* Dot clock */ 96 /* Dot clock */
82 clk = clk_get_rate(rcdu->clock); 97 clk = clk_get_rate(rcrtc->clock);
83 div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000); 98 div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000);
84 div = clamp(div, 1U, 64U) - 1; 99 div = clamp(div, 1U, 64U) - 1;
85 100
86 rcar_du_write(rcdu, rcrtc->index ? ESCR2 : ESCR, 101 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
87 ESCR_DCLKSEL_CLKS | div); 102 ESCR_DCLKSEL_CLKS | div);
88 rcar_du_write(rcdu, rcrtc->index ? OTAR2 : OTAR, 0); 103 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
89 104
90 /* Signal polarities */ 105 /* Signal polarities */
91 value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL) 106 value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
@@ -112,68 +127,25 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
112 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); 127 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
113} 128}
114 129
115static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc) 130void rcar_du_crtc_route_output(struct drm_crtc *crtc,
116{ 131 enum rcar_du_output output)
117 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
118 u32 dorcr = rcar_du_read(rcdu, DORCR);
119
120 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
121
122 /* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and
123 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by
124 * default.
125 */
126 if (rcrtc->outputs & (1 << 1) && rcrtc->index == 0)
127 dorcr |= DORCR_PG2D_DS1;
128 else
129 dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
130
131 rcar_du_write(rcdu, DORCR, dorcr);
132}
133
134static void __rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
135{
136 rcar_du_write(rcdu, DSYSR,
137 (rcar_du_read(rcdu, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
138 (start ? DSYSR_DEN : DSYSR_DRES));
139}
140
141static void rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
142{
143 /* Many of the configuration bits are only updated when the display
144 * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
145 * of those bits could be pre-configured, but others (especially the
146 * bits related to plane assignment to display timing controllers) need
147 * to be modified at runtime.
148 *
149 * Restart the display controller if a start is requested. Sorry for the
150 * flicker. It should be possible to move most of the "DRES-update" bits
151 * setup to driver initialization time and minimize the number of cases
152 * when the display controller will have to be restarted.
153 */
154 if (start) {
155 if (rcdu->used_crtcs++ != 0)
156 __rcar_du_start_stop(rcdu, false);
157 __rcar_du_start_stop(rcdu, true);
158 } else {
159 if (--rcdu->used_crtcs == 0)
160 __rcar_du_start_stop(rcdu, false);
161 }
162}
163
164void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output)
165{ 132{
166 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 133 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
134 struct rcar_du_device *rcdu = rcrtc->group->dev;
167 135
168 /* Store the route from the CRTC output to the DU output. The DU will be 136 /* Store the route from the CRTC output to the DU output. The DU will be
169 * configured when starting the CRTC. 137 * configured when starting the CRTC.
170 */ 138 */
171 rcrtc->outputs |= 1 << output; 139 rcrtc->outputs |= BIT(output);
140
141 /* Store RGB routing to DPAD0 for R8A7790. */
142 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_DEFR8) &&
143 output == RCAR_DU_OUTPUT_DPAD0)
144 rcdu->dpad0_source = rcrtc->index;
172} 145}
173 146
174void rcar_du_crtc_update_planes(struct drm_crtc *crtc) 147void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
175{ 148{
176 struct rcar_du_device *rcdu = crtc->dev->dev_private;
177 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 149 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
178 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES]; 150 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
179 unsigned int num_planes = 0; 151 unsigned int num_planes = 0;
@@ -182,8 +154,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
182 u32 dptsr = 0; 154 u32 dptsr = 0;
183 u32 dspr = 0; 155 u32 dspr = 0;
184 156
185 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 157 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
186 struct rcar_du_plane *plane = &rcdu->planes.planes[i]; 158 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
187 unsigned int j; 159 unsigned int j;
188 160
189 if (plane->crtc != &rcrtc->crtc || !plane->enabled) 161 if (plane->crtc != &rcrtc->crtc || !plane->enabled)
@@ -220,8 +192,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
220 /* Select display timing and dot clock generator 2 for planes associated 192 /* Select display timing and dot clock generator 2 for planes associated
221 * with superposition controller 2. 193 * with superposition controller 2.
222 */ 194 */
223 if (rcrtc->index) { 195 if (rcrtc->index % 2) {
224 u32 value = rcar_du_read(rcdu, DPTSR); 196 u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
225 197
226 /* The DPTSR register is updated when the display controller is 198 /* The DPTSR register is updated when the display controller is
227 * stopped. We thus need to restart the DU. Once again, sorry 199 * stopped. We thus need to restart the DU. Once again, sorry
@@ -231,21 +203,19 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
231 * occur only if we need to break the pre-association. 203 * occur only if we need to break the pre-association.
232 */ 204 */
233 if (value != dptsr) { 205 if (value != dptsr) {
234 rcar_du_write(rcdu, DPTSR, dptsr); 206 rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
235 if (rcdu->used_crtcs) { 207 if (rcrtc->group->used_crtcs)
236 __rcar_du_start_stop(rcdu, false); 208 rcar_du_group_restart(rcrtc->group);
237 __rcar_du_start_stop(rcdu, true);
238 }
239 } 209 }
240 } 210 }
241 211
242 rcar_du_write(rcdu, rcrtc->index ? DS2PR : DS1PR, dspr); 212 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
213 dspr);
243} 214}
244 215
245static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) 216static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
246{ 217{
247 struct drm_crtc *crtc = &rcrtc->crtc; 218 struct drm_crtc *crtc = &rcrtc->crtc;
248 struct rcar_du_device *rcdu = crtc->dev->dev_private;
249 unsigned int i; 219 unsigned int i;
250 220
251 if (rcrtc->started) 221 if (rcrtc->started)
@@ -260,16 +230,16 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
260 230
261 /* Configure display timings and output routing */ 231 /* Configure display timings and output routing */
262 rcar_du_crtc_set_display_timing(rcrtc); 232 rcar_du_crtc_set_display_timing(rcrtc);
263 rcar_du_crtc_set_routing(rcrtc); 233 rcar_du_group_set_routing(rcrtc->group);
264 234
265 mutex_lock(&rcdu->planes.lock); 235 mutex_lock(&rcrtc->group->planes.lock);
266 rcrtc->plane->enabled = true; 236 rcrtc->plane->enabled = true;
267 rcar_du_crtc_update_planes(crtc); 237 rcar_du_crtc_update_planes(crtc);
268 mutex_unlock(&rcdu->planes.lock); 238 mutex_unlock(&rcrtc->group->planes.lock);
269 239
270 /* Setup planes. */ 240 /* Setup planes. */
271 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 241 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
272 struct rcar_du_plane *plane = &rcdu->planes.planes[i]; 242 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
273 243
274 if (plane->crtc != crtc || !plane->enabled) 244 if (plane->crtc != crtc || !plane->enabled)
275 continue; 245 continue;
@@ -283,7 +253,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
283 */ 253 */
284 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER); 254 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER);
285 255
286 rcar_du_start_stop(rcdu, true); 256 rcar_du_group_start_stop(rcrtc->group, true);
287 257
288 rcrtc->started = true; 258 rcrtc->started = true;
289} 259}
@@ -291,42 +261,37 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
291static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) 261static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
292{ 262{
293 struct drm_crtc *crtc = &rcrtc->crtc; 263 struct drm_crtc *crtc = &rcrtc->crtc;
294 struct rcar_du_device *rcdu = crtc->dev->dev_private;
295 264
296 if (!rcrtc->started) 265 if (!rcrtc->started)
297 return; 266 return;
298 267
299 mutex_lock(&rcdu->planes.lock); 268 mutex_lock(&rcrtc->group->planes.lock);
300 rcrtc->plane->enabled = false; 269 rcrtc->plane->enabled = false;
301 rcar_du_crtc_update_planes(crtc); 270 rcar_du_crtc_update_planes(crtc);
302 mutex_unlock(&rcdu->planes.lock); 271 mutex_unlock(&rcrtc->group->planes.lock);
303 272
304 /* Select switch sync mode. This stops display operation and configures 273 /* Select switch sync mode. This stops display operation and configures
305 * the HSYNC and VSYNC signals as inputs. 274 * the HSYNC and VSYNC signals as inputs.
306 */ 275 */
307 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH); 276 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);
308 277
309 rcar_du_start_stop(rcdu, false); 278 rcar_du_group_start_stop(rcrtc->group, false);
310 279
311 rcrtc->started = false; 280 rcrtc->started = false;
312} 281}
313 282
314void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc) 283void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
315{ 284{
316 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
317
318 rcar_du_crtc_stop(rcrtc); 285 rcar_du_crtc_stop(rcrtc);
319 rcar_du_put(rcdu); 286 rcar_du_crtc_put(rcrtc);
320} 287}
321 288
322void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc) 289void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
323{ 290{
324 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
325
326 if (rcrtc->dpms != DRM_MODE_DPMS_ON) 291 if (rcrtc->dpms != DRM_MODE_DPMS_ON)
327 return; 292 return;
328 293
329 rcar_du_get(rcdu); 294 rcar_du_crtc_get(rcrtc);
330 rcar_du_crtc_start(rcrtc); 295 rcar_du_crtc_start(rcrtc);
331} 296}
332 297
@@ -340,18 +305,17 @@ static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
340 305
341static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode) 306static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
342{ 307{
343 struct rcar_du_device *rcdu = crtc->dev->dev_private;
344 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 308 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
345 309
346 if (rcrtc->dpms == mode) 310 if (rcrtc->dpms == mode)
347 return; 311 return;
348 312
349 if (mode == DRM_MODE_DPMS_ON) { 313 if (mode == DRM_MODE_DPMS_ON) {
350 rcar_du_get(rcdu); 314 rcar_du_crtc_get(rcrtc);
351 rcar_du_crtc_start(rcrtc); 315 rcar_du_crtc_start(rcrtc);
352 } else { 316 } else {
353 rcar_du_crtc_stop(rcrtc); 317 rcar_du_crtc_stop(rcrtc);
354 rcar_du_put(rcdu); 318 rcar_du_crtc_put(rcrtc);
355 } 319 }
356 320
357 rcrtc->dpms = mode; 321 rcrtc->dpms = mode;
@@ -367,13 +331,12 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
367 331
368static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc) 332static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
369{ 333{
370 struct rcar_du_device *rcdu = crtc->dev->dev_private;
371 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 334 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
372 335
373 /* We need to access the hardware during mode set, acquire a reference 336 /* We need to access the hardware during mode set, acquire a reference
374 * to the DU. 337 * to the CRTC.
375 */ 338 */
376 rcar_du_get(rcdu); 339 rcar_du_crtc_get(rcrtc);
377 340
378 /* Stop the CRTC and release the plane. Force the DPMS mode to off as a 341 /* Stop the CRTC and release the plane. Force the DPMS mode to off as a
379 * result. 342 * result.
@@ -390,8 +353,8 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
390 int x, int y, 353 int x, int y,
391 struct drm_framebuffer *old_fb) 354 struct drm_framebuffer *old_fb)
392{ 355{
393 struct rcar_du_device *rcdu = crtc->dev->dev_private;
394 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 356 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
357 struct rcar_du_device *rcdu = rcrtc->group->dev;
395 const struct rcar_du_format_info *format; 358 const struct rcar_du_format_info *format;
396 int ret; 359 int ret;
397 360
@@ -423,10 +386,10 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
423 386
424error: 387error:
425 /* There's no rollback/abort operation to clean up in case of error. We 388 /* There's no rollback/abort operation to clean up in case of error. We
426 * thus need to release the reference to the DU acquired in prepare() 389 * thus need to release the reference to the CRTC acquired in prepare()
427 * here. 390 * here.
428 */ 391 */
429 rcar_du_put(rcdu); 392 rcar_du_crtc_put(rcrtc);
430 return ret; 393 return ret;
431} 394}
432 395
@@ -514,9 +477,28 @@ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
514 drm_vblank_put(dev, rcrtc->index); 477 drm_vblank_put(dev, rcrtc->index);
515} 478}
516 479
480static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
481{
482 struct rcar_du_crtc *rcrtc = arg;
483 irqreturn_t ret = IRQ_NONE;
484 u32 status;
485
486 status = rcar_du_crtc_read(rcrtc, DSSR);
487 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
488
489 if (status & DSSR_VBK) {
490 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
491 rcar_du_crtc_finish_page_flip(rcrtc);
492 ret = IRQ_HANDLED;
493 }
494
495 return ret;
496}
497
517static int rcar_du_crtc_page_flip(struct drm_crtc *crtc, 498static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
518 struct drm_framebuffer *fb, 499 struct drm_framebuffer *fb,
519 struct drm_pending_vblank_event *event) 500 struct drm_pending_vblank_event *event,
501 uint32_t page_flip_flags)
520{ 502{
521 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 503 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
522 struct drm_device *dev = rcrtc->crtc.dev; 504 struct drm_device *dev = rcrtc->crtc.dev;
@@ -549,16 +531,41 @@ static const struct drm_crtc_funcs crtc_funcs = {
549 .page_flip = rcar_du_crtc_page_flip, 531 .page_flip = rcar_du_crtc_page_flip,
550}; 532};
551 533
552int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index) 534int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
553{ 535{
536 static const unsigned int mmio_offsets[] = {
537 DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET
538 };
539
540 struct rcar_du_device *rcdu = rgrp->dev;
541 struct platform_device *pdev = to_platform_device(rcdu->dev);
554 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index]; 542 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
555 struct drm_crtc *crtc = &rcrtc->crtc; 543 struct drm_crtc *crtc = &rcrtc->crtc;
544 unsigned int irqflags;
545 char clk_name[5];
546 char *name;
547 int irq;
556 int ret; 548 int ret;
557 549
558 rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0; 550 /* Get the CRTC clock. */
551 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
552 sprintf(clk_name, "du.%u", index);
553 name = clk_name;
554 } else {
555 name = NULL;
556 }
557
558 rcrtc->clock = devm_clk_get(rcdu->dev, name);
559 if (IS_ERR(rcrtc->clock)) {
560 dev_err(rcdu->dev, "no clock for CRTC %u\n", index);
561 return PTR_ERR(rcrtc->clock);
562 }
563
564 rcrtc->group = rgrp;
565 rcrtc->mmio_offset = mmio_offsets[index];
559 rcrtc->index = index; 566 rcrtc->index = index;
560 rcrtc->dpms = DRM_MODE_DPMS_OFF; 567 rcrtc->dpms = DRM_MODE_DPMS_OFF;
561 rcrtc->plane = &rcdu->planes.planes[index]; 568 rcrtc->plane = &rgrp->planes.planes[index % 2];
562 569
563 rcrtc->plane->crtc = crtc; 570 rcrtc->plane->crtc = crtc;
564 571
@@ -568,6 +575,28 @@ int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
568 575
569 drm_crtc_helper_add(crtc, &crtc_helper_funcs); 576 drm_crtc_helper_add(crtc, &crtc_helper_funcs);
570 577
578 /* Register the interrupt handler. */
579 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
580 irq = platform_get_irq(pdev, index);
581 irqflags = 0;
582 } else {
583 irq = platform_get_irq(pdev, 0);
584 irqflags = IRQF_SHARED;
585 }
586
587 if (irq < 0) {
588 dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
589 return ret;
590 }
591
592 ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
593 dev_name(rcdu->dev), rcrtc);
594 if (ret < 0) {
595 dev_err(rcdu->dev,
596 "failed to register IRQ for CRTC %u\n", index);
597 return ret;
598 }
599
571 return 0; 600 return 0;
572} 601}
573 602
@@ -580,16 +609,3 @@ void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable)
580 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE); 609 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
581 } 610 }
582} 611}
583
584void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc)
585{
586 u32 status;
587
588 status = rcar_du_crtc_read(rcrtc, DSSR);
589 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
590
591 if (status & DSSR_VBK) {
592 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
593 rcar_du_crtc_finish_page_flip(rcrtc);
594 }
595}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 2a0365bcbd14..43e7575c700c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,16 +15,18 @@
15#define __RCAR_DU_CRTC_H__ 15#define __RCAR_DU_CRTC_H__
16 16
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/platform_data/rcar-du.h>
18 19
19#include <drm/drmP.h> 20#include <drm/drmP.h>
20#include <drm/drm_crtc.h> 21#include <drm/drm_crtc.h>
21 22
22struct rcar_du_device; 23struct rcar_du_group;
23struct rcar_du_plane; 24struct rcar_du_plane;
24 25
25struct rcar_du_crtc { 26struct rcar_du_crtc {
26 struct drm_crtc crtc; 27 struct drm_crtc crtc;
27 28
29 struct clk *clock;
28 unsigned int mmio_offset; 30 unsigned int mmio_offset;
29 unsigned int index; 31 unsigned int index;
30 bool started; 32 bool started;
@@ -33,18 +35,21 @@ struct rcar_du_crtc {
33 unsigned int outputs; 35 unsigned int outputs;
34 int dpms; 36 int dpms;
35 37
38 struct rcar_du_group *group;
36 struct rcar_du_plane *plane; 39 struct rcar_du_plane *plane;
37}; 40};
38 41
39int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index); 42#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
43
44int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
40void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable); 45void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
41void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc);
42void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, 46void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
43 struct drm_file *file); 47 struct drm_file *file);
44void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); 48void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
45void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); 49void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
46 50
47void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output); 51void rcar_du_crtc_route_output(struct drm_crtc *crtc,
52 enum rcar_du_output output);
48void rcar_du_crtc_update_planes(struct drm_crtc *crtc); 53void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
49 54
50#endif /* __RCAR_DU_CRTC_H__ */ 55#endif /* __RCAR_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index dc0fe09b2ba1..0023f9719cf1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -21,6 +21,7 @@
21 21
22#include <drm/drmP.h> 22#include <drm/drmP.h>
23#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_fb_cma_helper.h>
24#include <drm/drm_gem_cma_helper.h> 25#include <drm/drm_gem_cma_helper.h>
25 26
26#include "rcar_du_crtc.h" 27#include "rcar_du_crtc.h"
@@ -29,74 +30,21 @@
29#include "rcar_du_regs.h" 30#include "rcar_du_regs.h"
30 31
31/* ----------------------------------------------------------------------------- 32/* -----------------------------------------------------------------------------
32 * Core device operations
33 */
34
35/*
36 * rcar_du_get - Acquire a reference to the DU
37 *
38 * Acquiring a reference enables the device clock and setup core registers. A
39 * reference must be held before accessing any hardware registers.
40 *
41 * This function must be called with the DRM mode_config lock held.
42 *
43 * Return 0 in case of success or a negative error code otherwise.
44 */
45int rcar_du_get(struct rcar_du_device *rcdu)
46{
47 int ret;
48
49 if (rcdu->use_count)
50 goto done;
51
52 /* Enable clocks before accessing the hardware. */
53 ret = clk_prepare_enable(rcdu->clock);
54 if (ret < 0)
55 return ret;
56
57 /* Enable extended features */
58 rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE);
59 rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
60 rcar_du_write(rcdu, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
61 rcar_du_write(rcdu, DEFR4, DEFR4_CODE);
62 rcar_du_write(rcdu, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
63
64 /* Use DS1PR and DS2PR to configure planes priorities and connects the
65 * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
66 */
67 rcar_du_write(rcdu, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
68
69done:
70 rcdu->use_count++;
71 return 0;
72}
73
74/*
75 * rcar_du_put - Release a reference to the DU
76 *
77 * Releasing the last reference disables the device clock.
78 *
79 * This function must be called with the DRM mode_config lock held.
80 */
81void rcar_du_put(struct rcar_du_device *rcdu)
82{
83 if (--rcdu->use_count)
84 return;
85
86 clk_disable_unprepare(rcdu->clock);
87}
88
89/* -----------------------------------------------------------------------------
90 * DRM operations 33 * DRM operations
91 */ 34 */
92 35
93static int rcar_du_unload(struct drm_device *dev) 36static int rcar_du_unload(struct drm_device *dev)
94{ 37{
38 struct rcar_du_device *rcdu = dev->dev_private;
39
40 if (rcdu->fbdev)
41 drm_fbdev_cma_fini(rcdu->fbdev);
42
95 drm_kms_helper_poll_fini(dev); 43 drm_kms_helper_poll_fini(dev);
96 drm_mode_config_cleanup(dev); 44 drm_mode_config_cleanup(dev);
97 drm_vblank_cleanup(dev); 45 drm_vblank_cleanup(dev);
98 drm_irq_uninstall(dev);
99 46
47 dev->irq_enabled = 0;
100 dev->dev_private = NULL; 48 dev->dev_private = NULL;
101 49
102 return 0; 50 return 0;
@@ -107,7 +55,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
107 struct platform_device *pdev = dev->platformdev; 55 struct platform_device *pdev = dev->platformdev;
108 struct rcar_du_platform_data *pdata = pdev->dev.platform_data; 56 struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
109 struct rcar_du_device *rcdu; 57 struct rcar_du_device *rcdu;
110 struct resource *ioarea;
111 struct resource *mem; 58 struct resource *mem;
112 int ret; 59 int ret;
113 60
@@ -124,35 +71,15 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
124 71
125 rcdu->dev = &pdev->dev; 72 rcdu->dev = &pdev->dev;
126 rcdu->pdata = pdata; 73 rcdu->pdata = pdata;
74 rcdu->info = (struct rcar_du_device_info *)pdev->id_entry->driver_data;
127 rcdu->ddev = dev; 75 rcdu->ddev = dev;
128 dev->dev_private = rcdu; 76 dev->dev_private = rcdu;
129 77
130 /* I/O resources and clocks */ 78 /* I/O resources */
131 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 79 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
132 if (mem == NULL) { 80 rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
133 dev_err(&pdev->dev, "failed to get memory resource\n"); 81 if (IS_ERR(rcdu->mmio))
134 return -EINVAL; 82 return PTR_ERR(rcdu->mmio);
135 }
136
137 ioarea = devm_request_mem_region(&pdev->dev, mem->start,
138 resource_size(mem), pdev->name);
139 if (ioarea == NULL) {
140 dev_err(&pdev->dev, "failed to request memory region\n");
141 return -EBUSY;
142 }
143
144 rcdu->mmio = devm_ioremap_nocache(&pdev->dev, ioarea->start,
145 resource_size(ioarea));
146 if (rcdu->mmio == NULL) {
147 dev_err(&pdev->dev, "failed to remap memory resource\n");
148 return -ENOMEM;
149 }
150
151 rcdu->clock = devm_clk_get(&pdev->dev, NULL);
152 if (IS_ERR(rcdu->clock)) {
153 dev_err(&pdev->dev, "failed to get clock\n");
154 return -ENOENT;
155 }
156 83
157 /* DRM/KMS objects */ 84 /* DRM/KMS objects */
158 ret = rcar_du_modeset_init(rcdu); 85 ret = rcar_du_modeset_init(rcdu);
@@ -161,18 +88,14 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
161 goto done; 88 goto done;
162 } 89 }
163 90
164 /* IRQ and vblank handling */ 91 /* vblank handling */
165 ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1); 92 ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
166 if (ret < 0) { 93 if (ret < 0) {
167 dev_err(&pdev->dev, "failed to initialize vblank\n"); 94 dev_err(&pdev->dev, "failed to initialize vblank\n");
168 goto done; 95 goto done;
169 } 96 }
170 97
171 ret = drm_irq_install(dev); 98 dev->irq_enabled = 1;
172 if (ret < 0) {
173 dev_err(&pdev->dev, "failed to install IRQ handler\n");
174 goto done;
175 }
176 99
177 platform_set_drvdata(pdev, rcdu); 100 platform_set_drvdata(pdev, rcdu);
178 101
@@ -188,20 +111,15 @@ static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
188 struct rcar_du_device *rcdu = dev->dev_private; 111 struct rcar_du_device *rcdu = dev->dev_private;
189 unsigned int i; 112 unsigned int i;
190 113
191 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) 114 for (i = 0; i < rcdu->num_crtcs; ++i)
192 rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file); 115 rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
193} 116}
194 117
195static irqreturn_t rcar_du_irq(int irq, void *arg) 118static void rcar_du_lastclose(struct drm_device *dev)
196{ 119{
197 struct drm_device *dev = arg;
198 struct rcar_du_device *rcdu = dev->dev_private; 120 struct rcar_du_device *rcdu = dev->dev_private;
199 unsigned int i;
200
201 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
202 rcar_du_crtc_irq(&rcdu->crtcs[i]);
203 121
204 return IRQ_HANDLED; 122 drm_fbdev_cma_restore_mode(rcdu->fbdev);
205} 123}
206 124
207static int rcar_du_enable_vblank(struct drm_device *dev, int crtc) 125static int rcar_du_enable_vblank(struct drm_device *dev, int crtc)
@@ -230,18 +148,16 @@ static const struct file_operations rcar_du_fops = {
230#endif 148#endif
231 .poll = drm_poll, 149 .poll = drm_poll,
232 .read = drm_read, 150 .read = drm_read,
233 .fasync = drm_fasync,
234 .llseek = no_llseek, 151 .llseek = no_llseek,
235 .mmap = drm_gem_cma_mmap, 152 .mmap = drm_gem_cma_mmap,
236}; 153};
237 154
238static struct drm_driver rcar_du_driver = { 155static struct drm_driver rcar_du_driver = {
239 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET 156 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
240 | DRIVER_PRIME,
241 .load = rcar_du_load, 157 .load = rcar_du_load,
242 .unload = rcar_du_unload, 158 .unload = rcar_du_unload,
243 .preclose = rcar_du_preclose, 159 .preclose = rcar_du_preclose,
244 .irq_handler = rcar_du_irq, 160 .lastclose = rcar_du_lastclose,
245 .get_vblank_counter = drm_vblank_count, 161 .get_vblank_counter = drm_vblank_count,
246 .enable_vblank = rcar_du_enable_vblank, 162 .enable_vblank = rcar_du_enable_vblank,
247 .disable_vblank = rcar_du_disable_vblank, 163 .disable_vblank = rcar_du_disable_vblank,
@@ -258,7 +174,7 @@ static struct drm_driver rcar_du_driver = {
258 .gem_prime_mmap = drm_gem_cma_prime_mmap, 174 .gem_prime_mmap = drm_gem_cma_prime_mmap,
259 .dumb_create = rcar_du_dumb_create, 175 .dumb_create = rcar_du_dumb_create,
260 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 176 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
261 .dumb_destroy = drm_gem_cma_dumb_destroy, 177 .dumb_destroy = drm_gem_dumb_destroy,
262 .fops = &rcar_du_fops, 178 .fops = &rcar_du_fops,
263 .name = "rcar-du", 179 .name = "rcar-du",
264 .desc = "Renesas R-Car Display Unit", 180 .desc = "Renesas R-Car Display Unit",
@@ -313,6 +229,57 @@ static int rcar_du_remove(struct platform_device *pdev)
313 return 0; 229 return 0;
314} 230}
315 231
232static const struct rcar_du_device_info rcar_du_r8a7779_info = {
233 .features = 0,
234 .num_crtcs = 2,
235 .routes = {
236 /* R8A7779 has two RGB outputs and one (currently unsupported)
237 * TCON output.
238 */
239 [RCAR_DU_OUTPUT_DPAD0] = {
240 .possible_crtcs = BIT(0),
241 .encoder_type = DRM_MODE_ENCODER_NONE,
242 },
243 [RCAR_DU_OUTPUT_DPAD1] = {
244 .possible_crtcs = BIT(1) | BIT(0),
245 .encoder_type = DRM_MODE_ENCODER_NONE,
246 },
247 },
248 .num_lvds = 0,
249};
250
251static const struct rcar_du_device_info rcar_du_r8a7790_info = {
252 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
253 | RCAR_DU_FEATURE_DEFR8,
254 .num_crtcs = 3,
255 .routes = {
256 /* R8A7790 has one RGB output, two LVDS outputs and one
257 * (currently unsupported) TCON output.
258 */
259 [RCAR_DU_OUTPUT_DPAD0] = {
260 .possible_crtcs = BIT(2) | BIT(1) | BIT(0),
261 .encoder_type = DRM_MODE_ENCODER_NONE,
262 },
263 [RCAR_DU_OUTPUT_LVDS0] = {
264 .possible_crtcs = BIT(0),
265 .encoder_type = DRM_MODE_ENCODER_LVDS,
266 },
267 [RCAR_DU_OUTPUT_LVDS1] = {
268 .possible_crtcs = BIT(2) | BIT(1),
269 .encoder_type = DRM_MODE_ENCODER_LVDS,
270 },
271 },
272 .num_lvds = 2,
273};
274
275static const struct platform_device_id rcar_du_id_table[] = {
276 { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
277 { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
278 { }
279};
280
281MODULE_DEVICE_TABLE(platform, rcar_du_id_table);
282
316static struct platform_driver rcar_du_platform_driver = { 283static struct platform_driver rcar_du_platform_driver = {
317 .probe = rcar_du_probe, 284 .probe = rcar_du_probe,
318 .remove = rcar_du_remove, 285 .remove = rcar_du_remove,
@@ -321,6 +288,7 @@ static struct platform_driver rcar_du_platform_driver = {
321 .name = "rcar-du", 288 .name = "rcar-du",
322 .pm = &rcar_du_pm_ops, 289 .pm = &rcar_du_pm_ops,
323 }, 290 },
291 .id_table = rcar_du_id_table,
324}; 292};
325 293
326module_platform_driver(rcar_du_platform_driver); 294module_platform_driver(rcar_du_platform_driver);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 193cc59d495c..65d2d636b002 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,43 +15,74 @@
15#define __RCAR_DU_DRV_H__ 15#define __RCAR_DU_DRV_H__
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/mutex.h>
19#include <linux/platform_data/rcar-du.h> 18#include <linux/platform_data/rcar-du.h>
20 19
21#include "rcar_du_crtc.h" 20#include "rcar_du_crtc.h"
22#include "rcar_du_plane.h" 21#include "rcar_du_group.h"
23 22
24struct clk; 23struct clk;
25struct device; 24struct device;
26struct drm_device; 25struct drm_device;
26struct drm_fbdev_cma;
27struct rcar_du_device;
28struct rcar_du_lvdsenc;
29
30#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
31#define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */
32#define RCAR_DU_FEATURE_DEFR8 (1 << 2) /* Has DEFR8 register */
33
34/*
35 * struct rcar_du_output_routing - Output routing specification
36 * @possible_crtcs: bitmask of possible CRTCs for the output
37 * @encoder_type: DRM type of the internal encoder associated with the output
38 *
39 * The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data
40 * specify the valid SoC outputs, which CRTCs can drive the output, and the type
41 * of in-SoC encoder for the output.
42 */
43struct rcar_du_output_routing {
44 unsigned int possible_crtcs;
45 unsigned int encoder_type;
46};
47
48/*
49 * struct rcar_du_device_info - DU model-specific information
50 * @features: device features (RCAR_DU_FEATURE_*)
51 * @num_crtcs: total number of CRTCs
52 * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
53 * @num_lvds: number of internal LVDS encoders
54 */
55struct rcar_du_device_info {
56 unsigned int features;
57 unsigned int num_crtcs;
58 struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
59 unsigned int num_lvds;
60};
27 61
28struct rcar_du_device { 62struct rcar_du_device {
29 struct device *dev; 63 struct device *dev;
30 const struct rcar_du_platform_data *pdata; 64 const struct rcar_du_platform_data *pdata;
65 const struct rcar_du_device_info *info;
31 66
32 void __iomem *mmio; 67 void __iomem *mmio;
33 struct clk *clock;
34 unsigned int use_count;
35 68
36 struct drm_device *ddev; 69 struct drm_device *ddev;
70 struct drm_fbdev_cma *fbdev;
37 71
38 struct rcar_du_crtc crtcs[2]; 72 struct rcar_du_crtc crtcs[3];
39 unsigned int used_crtcs;
40 unsigned int num_crtcs; 73 unsigned int num_crtcs;
41 74
42 struct { 75 struct rcar_du_group groups[2];
43 struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
44 unsigned int free;
45 struct mutex lock;
46 76
47 struct drm_property *alpha; 77 unsigned int dpad0_source;
48 struct drm_property *colorkey; 78 struct rcar_du_lvdsenc *lvds[2];
49 struct drm_property *zpos;
50 } planes;
51}; 79};
52 80
53int rcar_du_get(struct rcar_du_device *rcdu); 81static inline bool rcar_du_has(struct rcar_du_device *rcdu,
54void rcar_du_put(struct rcar_du_device *rcdu); 82 unsigned int feature)
83{
84 return rcdu->info->features & feature;
85}
55 86
56static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg) 87static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
57{ 88{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
new file mode 100644
index 000000000000..3daa7a168dc6
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -0,0 +1,202 @@
1/*
2 * rcar_du_encoder.c -- R-Car Display Unit Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/export.h>
15
16#include <drm/drmP.h>
17#include <drm/drm_crtc.h>
18#include <drm/drm_crtc_helper.h>
19
20#include "rcar_du_drv.h"
21#include "rcar_du_encoder.h"
22#include "rcar_du_kms.h"
23#include "rcar_du_lvdscon.h"
24#include "rcar_du_lvdsenc.h"
25#include "rcar_du_vgacon.h"
26
27/* -----------------------------------------------------------------------------
28 * Common connector functions
29 */
30
31struct drm_encoder *
32rcar_du_connector_best_encoder(struct drm_connector *connector)
33{
34 struct rcar_du_connector *rcon = to_rcar_connector(connector);
35
36 return &rcon->encoder->encoder;
37}
38
39/* -----------------------------------------------------------------------------
40 * Encoder
41 */
42
43static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode)
44{
45 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
46
47 if (renc->lvds)
48 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode);
49}
50
51static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
52 const struct drm_display_mode *mode,
53 struct drm_display_mode *adjusted_mode)
54{
55 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
56 const struct drm_display_mode *panel_mode;
57 struct drm_device *dev = encoder->dev;
58 struct drm_connector *connector;
59 bool found = false;
60
61 /* DAC encoders have currently no restriction on the mode. */
62 if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
63 return true;
64
65 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
66 if (connector->encoder == encoder) {
67 found = true;
68 break;
69 }
70 }
71
72 if (!found) {
73 dev_dbg(dev->dev, "mode_fixup: no connector found\n");
74 return false;
75 }
76
77 if (list_empty(&connector->modes)) {
78 dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
79 return false;
80 }
81
82 panel_mode = list_first_entry(&connector->modes,
83 struct drm_display_mode, head);
84
85 /* We're not allowed to modify the resolution. */
86 if (mode->hdisplay != panel_mode->hdisplay ||
87 mode->vdisplay != panel_mode->vdisplay)
88 return false;
89
90 /* The flat panel mode is fixed, just copy it to the adjusted mode. */
91 drm_mode_copy(adjusted_mode, panel_mode);
92
93 /* The internal LVDS encoder has a clock frequency operating range of
94 * 30MHz to 150MHz. Clamp the clock accordingly.
95 */
96 if (renc->lvds)
97 adjusted_mode->clock = clamp(adjusted_mode->clock,
98 30000, 150000);
99
100 return true;
101}
102
103static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
104{
105 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
106
107 if (renc->lvds)
108 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
109 DRM_MODE_DPMS_OFF);
110}
111
112static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
113{
114 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
115
116 if (renc->lvds)
117 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
118 DRM_MODE_DPMS_ON);
119}
120
121static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
122 struct drm_display_mode *mode,
123 struct drm_display_mode *adjusted_mode)
124{
125 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
126
127 rcar_du_crtc_route_output(encoder->crtc, renc->output);
128}
129
130static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
131 .dpms = rcar_du_encoder_dpms,
132 .mode_fixup = rcar_du_encoder_mode_fixup,
133 .prepare = rcar_du_encoder_mode_prepare,
134 .commit = rcar_du_encoder_mode_commit,
135 .mode_set = rcar_du_encoder_mode_set,
136};
137
138static const struct drm_encoder_funcs encoder_funcs = {
139 .destroy = drm_encoder_cleanup,
140};
141
142int rcar_du_encoder_init(struct rcar_du_device *rcdu,
143 enum rcar_du_encoder_type type,
144 enum rcar_du_output output,
145 const struct rcar_du_encoder_data *data)
146{
147 struct rcar_du_encoder *renc;
148 unsigned int encoder_type;
149 int ret;
150
151 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
152 if (renc == NULL)
153 return -ENOMEM;
154
155 renc->output = output;
156
157 switch (output) {
158 case RCAR_DU_OUTPUT_LVDS0:
159 renc->lvds = rcdu->lvds[0];
160 break;
161
162 case RCAR_DU_OUTPUT_LVDS1:
163 renc->lvds = rcdu->lvds[1];
164 break;
165
166 default:
167 break;
168 }
169
170 switch (type) {
171 case RCAR_DU_ENCODER_VGA:
172 encoder_type = DRM_MODE_ENCODER_DAC;
173 break;
174 case RCAR_DU_ENCODER_LVDS:
175 encoder_type = DRM_MODE_ENCODER_LVDS;
176 break;
177 case RCAR_DU_ENCODER_NONE:
178 default:
179 /* No external encoder, use the internal encoder type. */
180 encoder_type = rcdu->info->routes[output].encoder_type;
181 break;
182 }
183
184 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
185 encoder_type);
186 if (ret < 0)
187 return ret;
188
189 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
190
191 switch (encoder_type) {
192 case DRM_MODE_ENCODER_LVDS:
193 return rcar_du_lvds_connector_init(rcdu, renc,
194 &data->connector.lvds.panel);
195
196 case DRM_MODE_ENCODER_DAC:
197 return rcar_du_vga_connector_init(rcdu, renc);
198
199 default:
200 return -EINVAL;
201 }
202}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
new file mode 100644
index 000000000000..0e5a65e45d0e
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -0,0 +1,49 @@
1/*
2 * rcar_du_encoder.h -- R-Car Display Unit Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_ENCODER_H__
15#define __RCAR_DU_ENCODER_H__
16
17#include <linux/platform_data/rcar-du.h>
18
19#include <drm/drm_crtc.h>
20
21struct rcar_du_device;
22struct rcar_du_lvdsenc;
23
24struct rcar_du_encoder {
25 struct drm_encoder encoder;
26 enum rcar_du_output output;
27 struct rcar_du_lvdsenc *lvds;
28};
29
30#define to_rcar_encoder(e) \
31 container_of(e, struct rcar_du_encoder, encoder)
32
33struct rcar_du_connector {
34 struct drm_connector connector;
35 struct rcar_du_encoder *encoder;
36};
37
38#define to_rcar_connector(c) \
39 container_of(c, struct rcar_du_connector, connector)
40
41struct drm_encoder *
42rcar_du_connector_best_encoder(struct drm_connector *connector);
43
44int rcar_du_encoder_init(struct rcar_du_device *rcdu,
45 enum rcar_du_encoder_type type,
46 enum rcar_du_output output,
47 const struct rcar_du_encoder_data *data);
48
49#endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
new file mode 100644
index 000000000000..eb53cd97e8c6
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -0,0 +1,187 @@
1/*
2 * rcar_du_group.c -- R-Car Display Unit Channels Pair
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14/*
15 * The R8A7779 DU is split in per-CRTC resources (scan-out engine, blending
16 * unit, timings generator, ...) and device-global resources (start/stop
17 * control, planes, ...) shared between the two CRTCs.
18 *
19 * The R8A7790 introduced a third CRTC with its own set of global resources.
20 * This would be modeled as two separate DU device instances if it wasn't for
21 * a handful or resources that are shared between the three CRTCs (mostly
22 * related to input and output routing). For this reason the R8A7790 DU must be
23 * modeled as a single device with three CRTCs, two sets of "semi-global"
24 * resources, and a few device-global resources.
25 *
26 * The rcar_du_group object is a driver specific object, without any real
27 * counterpart in the DU documentation, that models those semi-global resources.
28 */
29
30#include <linux/clk.h>
31#include <linux/io.h>
32
33#include "rcar_du_drv.h"
34#include "rcar_du_group.h"
35#include "rcar_du_regs.h"
36
37u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg)
38{
39 return rcar_du_read(rgrp->dev, rgrp->mmio_offset + reg);
40}
41
42void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data)
43{
44 rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data);
45}
46
47static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
48{
49 u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
50
51 if (!rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8))
52 return;
53
54 /* The DEFR8 register for the first group also controls RGB output
55 * routing to DPAD0
56 */
57 if (rgrp->index == 0)
58 defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source);
59
60 rcar_du_group_write(rgrp, DEFR8, defr8);
61}
62
63static void rcar_du_group_setup(struct rcar_du_group *rgrp)
64{
65 /* Enable extended features */
66 rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
67 rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
68 rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
69 rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
70 rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
71
72 rcar_du_group_setup_defr8(rgrp);
73
74 /* Use DS1PR and DS2PR to configure planes priorities and connects the
75 * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
76 */
77 rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
78}
79
80/*
81 * rcar_du_group_get - Acquire a reference to the DU channels group
82 *
83 * Acquiring the first reference setups core registers. A reference must be held
84 * before accessing any hardware registers.
85 *
86 * This function must be called with the DRM mode_config lock held.
87 *
88 * Return 0 in case of success or a negative error code otherwise.
89 */
90int rcar_du_group_get(struct rcar_du_group *rgrp)
91{
92 if (rgrp->use_count)
93 goto done;
94
95 rcar_du_group_setup(rgrp);
96
97done:
98 rgrp->use_count++;
99 return 0;
100}
101
102/*
103 * rcar_du_group_put - Release a reference to the DU
104 *
105 * This function must be called with the DRM mode_config lock held.
106 */
107void rcar_du_group_put(struct rcar_du_group *rgrp)
108{
109 --rgrp->use_count;
110}
111
112static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
113{
114 rcar_du_group_write(rgrp, DSYSR,
115 (rcar_du_group_read(rgrp, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
116 (start ? DSYSR_DEN : DSYSR_DRES));
117}
118
119void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
120{
121 /* Many of the configuration bits are only updated when the display
122 * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
123 * of those bits could be pre-configured, but others (especially the
124 * bits related to plane assignment to display timing controllers) need
125 * to be modified at runtime.
126 *
127 * Restart the display controller if a start is requested. Sorry for the
128 * flicker. It should be possible to move most of the "DRES-update" bits
129 * setup to driver initialization time and minimize the number of cases
130 * when the display controller will have to be restarted.
131 */
132 if (start) {
133 if (rgrp->used_crtcs++ != 0)
134 __rcar_du_group_start_stop(rgrp, false);
135 __rcar_du_group_start_stop(rgrp, true);
136 } else {
137 if (--rgrp->used_crtcs == 0)
138 __rcar_du_group_start_stop(rgrp, false);
139 }
140}
141
142void rcar_du_group_restart(struct rcar_du_group *rgrp)
143{
144 __rcar_du_group_start_stop(rgrp, false);
145 __rcar_du_group_start_stop(rgrp, true);
146}
147
148static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu)
149{
150 int ret;
151
152 /* RGB output routing to DPAD0 is configured in the DEFR8 register of
153 * the first group. As this function can be called with the DU0 and DU1
154 * CRTCs disabled, we need to enable the first group clock before
155 * accessing the register.
156 */
157 ret = clk_prepare_enable(rcdu->crtcs[0].clock);
158 if (ret < 0)
159 return ret;
160
161 rcar_du_group_setup_defr8(&rcdu->groups[0]);
162
163 clk_disable_unprepare(rcdu->crtcs[0].clock);
164
165 return 0;
166}
167
168int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
169{
170 struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2];
171 u32 dorcr = rcar_du_group_read(rgrp, DORCR);
172
173 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
174
175 /* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
176 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
177 * by default.
178 */
179 if (crtc0->outputs & BIT(RCAR_DU_OUTPUT_DPAD1))
180 dorcr |= DORCR_PG2D_DS1;
181 else
182 dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
183
184 rcar_du_group_write(rgrp, DORCR, dorcr);
185
186 return rcar_du_set_dpad0_routing(rgrp->dev);
187}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
new file mode 100644
index 000000000000..5025930972ec
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -0,0 +1,50 @@
1/*
2 * rcar_du_group.c -- R-Car Display Unit Planes and CRTCs Group
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_GROUP_H__
15#define __RCAR_DU_GROUP_H__
16
17#include "rcar_du_plane.h"
18
19struct rcar_du_device;
20
21/*
22 * struct rcar_du_group - CRTCs and planes group
23 * @dev: the DU device
24 * @mmio_offset: registers offset in the device memory map
25 * @index: group index
26 * @use_count: number of users of the group (rcar_du_group_(get|put))
27 * @used_crtcs: number of CRTCs currently in use
28 * @planes: planes handled by the group
29 */
30struct rcar_du_group {
31 struct rcar_du_device *dev;
32 unsigned int mmio_offset;
33 unsigned int index;
34
35 unsigned int use_count;
36 unsigned int used_crtcs;
37
38 struct rcar_du_planes planes;
39};
40
41u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg);
42void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data);
43
44int rcar_du_group_get(struct rcar_du_group *rgrp);
45void rcar_du_group_put(struct rcar_du_group *rgrp);
46void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start);
47void rcar_du_group_restart(struct rcar_du_group *rgrp);
48int rcar_du_group_set_routing(struct rcar_du_group *rgrp);
49
50#endif /* __RCAR_DU_GROUP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index d30c2e29bee2..b31ac080c4a7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -19,10 +19,10 @@
19 19
20#include "rcar_du_crtc.h" 20#include "rcar_du_crtc.h"
21#include "rcar_du_drv.h" 21#include "rcar_du_drv.h"
22#include "rcar_du_encoder.h"
22#include "rcar_du_kms.h" 23#include "rcar_du_kms.h"
23#include "rcar_du_lvds.h" 24#include "rcar_du_lvdsenc.h"
24#include "rcar_du_regs.h" 25#include "rcar_du_regs.h"
25#include "rcar_du_vga.h"
26 26
27/* ----------------------------------------------------------------------------- 27/* -----------------------------------------------------------------------------
28 * Format helpers 28 * Format helpers
@@ -106,46 +106,24 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
106} 106}
107 107
108/* ----------------------------------------------------------------------------- 108/* -----------------------------------------------------------------------------
109 * Common connector and encoder functions
110 */
111
112struct drm_encoder *
113rcar_du_connector_best_encoder(struct drm_connector *connector)
114{
115 struct rcar_du_connector *rcon = to_rcar_connector(connector);
116
117 return &rcon->encoder->encoder;
118}
119
120void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
121{
122}
123
124void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
125 struct drm_display_mode *mode,
126 struct drm_display_mode *adjusted_mode)
127{
128 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
129
130 rcar_du_crtc_route_output(encoder->crtc, renc->output);
131}
132
133void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
134{
135}
136
137/* -----------------------------------------------------------------------------
138 * Frame buffer 109 * Frame buffer
139 */ 110 */
140 111
141int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, 112int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
142 struct drm_mode_create_dumb *args) 113 struct drm_mode_create_dumb *args)
143{ 114{
115 struct rcar_du_device *rcdu = dev->dev_private;
144 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 116 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
145 unsigned int align; 117 unsigned int align;
146 118
147 /* The pitch must be aligned to a 16 pixels boundary. */ 119 /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
148 align = 16 * args->bpp / 8; 120 * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
121 */
122 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
123 align = 128;
124 else
125 align = 16 * args->bpp / 8;
126
149 args->pitch = roundup(max(args->pitch, min_pitch), align); 127 args->pitch = roundup(max(args->pitch, min_pitch), align);
150 128
151 return drm_gem_cma_dumb_create(file, dev, args); 129 return drm_gem_cma_dumb_create(file, dev, args);
@@ -155,6 +133,7 @@ static struct drm_framebuffer *
155rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, 133rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
156 struct drm_mode_fb_cmd2 *mode_cmd) 134 struct drm_mode_fb_cmd2 *mode_cmd)
157{ 135{
136 struct rcar_du_device *rcdu = dev->dev_private;
158 const struct rcar_du_format_info *format; 137 const struct rcar_du_format_info *format;
159 unsigned int align; 138 unsigned int align;
160 139
@@ -165,7 +144,10 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
165 return ERR_PTR(-EINVAL); 144 return ERR_PTR(-EINVAL);
166 } 145 }
167 146
168 align = 16 * format->bpp / 8; 147 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
148 align = 128;
149 else
150 align = 16 * format->bpp / 8;
169 151
170 if (mode_cmd->pitches[0] & (align - 1) || 152 if (mode_cmd->pitches[0] & (align - 1) ||
171 mode_cmd->pitches[0] >= 8192) { 153 mode_cmd->pitches[0] >= 8192) {
@@ -185,81 +167,124 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
185 return drm_fb_cma_create(dev, file_priv, mode_cmd); 167 return drm_fb_cma_create(dev, file_priv, mode_cmd);
186} 168}
187 169
170static void rcar_du_output_poll_changed(struct drm_device *dev)
171{
172 struct rcar_du_device *rcdu = dev->dev_private;
173
174 drm_fbdev_cma_hotplug_event(rcdu->fbdev);
175}
176
188static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { 177static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
189 .fb_create = rcar_du_fb_create, 178 .fb_create = rcar_du_fb_create,
179 .output_poll_changed = rcar_du_output_poll_changed,
190}; 180};
191 181
192int rcar_du_modeset_init(struct rcar_du_device *rcdu) 182int rcar_du_modeset_init(struct rcar_du_device *rcdu)
193{ 183{
184 static const unsigned int mmio_offsets[] = {
185 DU0_REG_OFFSET, DU2_REG_OFFSET
186 };
187
194 struct drm_device *dev = rcdu->ddev; 188 struct drm_device *dev = rcdu->ddev;
195 struct drm_encoder *encoder; 189 struct drm_encoder *encoder;
190 struct drm_fbdev_cma *fbdev;
191 unsigned int num_groups;
196 unsigned int i; 192 unsigned int i;
197 int ret; 193 int ret;
198 194
199 drm_mode_config_init(rcdu->ddev); 195 drm_mode_config_init(dev);
200 196
201 rcdu->ddev->mode_config.min_width = 0; 197 dev->mode_config.min_width = 0;
202 rcdu->ddev->mode_config.min_height = 0; 198 dev->mode_config.min_height = 0;
203 rcdu->ddev->mode_config.max_width = 4095; 199 dev->mode_config.max_width = 4095;
204 rcdu->ddev->mode_config.max_height = 2047; 200 dev->mode_config.max_height = 2047;
205 rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs; 201 dev->mode_config.funcs = &rcar_du_mode_config_funcs;
206 202
207 ret = rcar_du_plane_init(rcdu); 203 rcdu->num_crtcs = rcdu->info->num_crtcs;
208 if (ret < 0) 204
209 return ret; 205 /* Initialize the groups. */
206 num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
207
208 for (i = 0; i < num_groups; ++i) {
209 struct rcar_du_group *rgrp = &rcdu->groups[i];
210
211 rgrp->dev = rcdu;
212 rgrp->mmio_offset = mmio_offsets[i];
213 rgrp->index = i;
214
215 ret = rcar_du_planes_init(rgrp);
216 if (ret < 0)
217 return ret;
218 }
219
220 /* Create the CRTCs. */
221 for (i = 0; i < rcdu->num_crtcs; ++i) {
222 struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
210 223
211 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) { 224 ret = rcar_du_crtc_create(rgrp, i);
212 ret = rcar_du_crtc_create(rcdu, i);
213 if (ret < 0) 225 if (ret < 0)
214 return ret; 226 return ret;
215 } 227 }
216 228
217 rcdu->used_crtcs = 0; 229 /* Initialize the encoders. */
218 rcdu->num_crtcs = i; 230 ret = rcar_du_lvdsenc_init(rcdu);
231 if (ret < 0)
232 return ret;
219 233
220 for (i = 0; i < rcdu->pdata->num_encoders; ++i) { 234 for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
221 const struct rcar_du_encoder_data *pdata = 235 const struct rcar_du_encoder_data *pdata =
222 &rcdu->pdata->encoders[i]; 236 &rcdu->pdata->encoders[i];
237 const struct rcar_du_output_routing *route =
238 &rcdu->info->routes[pdata->output];
239
240 if (pdata->type == RCAR_DU_ENCODER_UNUSED)
241 continue;
223 242
224 if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) { 243 if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
244 route->possible_crtcs == 0) {
225 dev_warn(rcdu->dev, 245 dev_warn(rcdu->dev,
226 "encoder %u references unexisting output %u, skipping\n", 246 "encoder %u references unexisting output %u, skipping\n",
227 i, pdata->output); 247 i, pdata->output);
228 continue; 248 continue;
229 } 249 }
230 250
231 switch (pdata->encoder) { 251 rcar_du_encoder_init(rcdu, pdata->type, pdata->output, pdata);
232 case RCAR_DU_ENCODER_VGA:
233 rcar_du_vga_init(rcdu, &pdata->u.vga, pdata->output);
234 break;
235
236 case RCAR_DU_ENCODER_LVDS:
237 rcar_du_lvds_init(rcdu, &pdata->u.lvds, pdata->output);
238 break;
239
240 default:
241 break;
242 }
243 } 252 }
244 253
245 /* Set the possible CRTCs and possible clones. All encoders can be 254 /* Set the possible CRTCs and possible clones. There's always at least
246 * driven by the CRTC associated with the output they're connected to, 255 * one way for all encoders to clone each other, set all bits in the
247 * as well as by CRTC 0. 256 * possible clones field.
248 */ 257 */
249 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 258 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
250 struct rcar_du_encoder *renc = to_rcar_encoder(encoder); 259 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
260 const struct rcar_du_output_routing *route =
261 &rcdu->info->routes[renc->output];
251 262
252 encoder->possible_crtcs = (1 << 0) | (1 << renc->output); 263 encoder->possible_crtcs = route->possible_crtcs;
253 encoder->possible_clones = 1 << 0; 264 encoder->possible_clones = (1 << rcdu->pdata->num_encoders) - 1;
254 } 265 }
255 266
256 ret = rcar_du_plane_register(rcdu); 267 /* Now that the CRTCs have been initialized register the planes. */
257 if (ret < 0) 268 for (i = 0; i < num_groups; ++i) {
258 return ret; 269 ret = rcar_du_planes_register(&rcdu->groups[i]);
270 if (ret < 0)
271 return ret;
272 }
273
274 drm_kms_helper_poll_init(dev);
275
276 drm_helper_disable_unused_functions(dev);
277
278 fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
279 dev->mode_config.num_connector);
280 if (IS_ERR(fbdev))
281 return PTR_ERR(fbdev);
259 282
260 drm_kms_helper_poll_init(rcdu->ddev); 283#ifndef CONFIG_FRAMEBUFFER_CONSOLE
284 drm_fbdev_cma_restore_mode(fbdev);
285#endif
261 286
262 drm_helper_disable_unused_functions(rcdu->ddev); 287 rcdu->fbdev = fbdev;
263 288
264 return 0; 289 return 0;
265} 290}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index dba472263486..5750e6af5655 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -16,8 +16,9 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18 18
19#include <drm/drm_crtc.h> 19struct drm_file;
20 20struct drm_device;
21struct drm_mode_create_dumb;
21struct rcar_du_device; 22struct rcar_du_device;
22 23
23struct rcar_du_format_info { 24struct rcar_du_format_info {
@@ -28,32 +29,8 @@ struct rcar_du_format_info {
28 unsigned int edf; 29 unsigned int edf;
29}; 30};
30 31
31struct rcar_du_encoder {
32 struct drm_encoder encoder;
33 unsigned int output;
34};
35
36#define to_rcar_encoder(e) \
37 container_of(e, struct rcar_du_encoder, encoder)
38
39struct rcar_du_connector {
40 struct drm_connector connector;
41 struct rcar_du_encoder *encoder;
42};
43
44#define to_rcar_connector(c) \
45 container_of(c, struct rcar_du_connector, connector)
46
47const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc); 32const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc);
48 33
49struct drm_encoder *
50rcar_du_connector_best_encoder(struct drm_connector *connector);
51void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder);
52void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
53 struct drm_display_mode *mode,
54 struct drm_display_mode *adjusted_mode);
55void rcar_du_encoder_mode_commit(struct drm_encoder *encoder);
56
57int rcar_du_modeset_init(struct rcar_du_device *rcdu); 34int rcar_du_modeset_init(struct rcar_du_device *rcdu);
58 35
59int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, 36int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 7aefe7267e1d..4f3ba93cd91d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_lvds.c -- R-Car Display Unit LVDS Encoder and Connector 2 * rcar_du_lvdscon.c -- R-Car Display Unit LVDS Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -16,8 +16,9 @@
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include "rcar_du_drv.h" 18#include "rcar_du_drv.h"
19#include "rcar_du_encoder.h"
19#include "rcar_du_kms.h" 20#include "rcar_du_kms.h"
20#include "rcar_du_lvds.h" 21#include "rcar_du_lvdscon.h"
21 22
22struct rcar_du_lvds_connector { 23struct rcar_du_lvds_connector {
23 struct rcar_du_connector connector; 24 struct rcar_du_connector connector;
@@ -28,13 +29,10 @@ struct rcar_du_lvds_connector {
28#define to_rcar_lvds_connector(c) \ 29#define to_rcar_lvds_connector(c) \
29 container_of(c, struct rcar_du_lvds_connector, connector.connector) 30 container_of(c, struct rcar_du_lvds_connector, connector.connector)
30 31
31/* -----------------------------------------------------------------------------
32 * Connector
33 */
34
35static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector) 32static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
36{ 33{
37 struct rcar_du_lvds_connector *lvdscon = to_rcar_lvds_connector(connector); 34 struct rcar_du_lvds_connector *lvdscon =
35 to_rcar_lvds_connector(connector);
38 struct drm_display_mode *mode; 36 struct drm_display_mode *mode;
39 37
40 mode = drm_mode_create(connector->dev); 38 mode = drm_mode_create(connector->dev);
@@ -90,9 +88,9 @@ static const struct drm_connector_funcs connector_funcs = {
90 .destroy = rcar_du_lvds_connector_destroy, 88 .destroy = rcar_du_lvds_connector_destroy,
91}; 89};
92 90
93static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, 91int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
94 struct rcar_du_encoder *renc, 92 struct rcar_du_encoder *renc,
95 const struct rcar_du_panel_data *panel) 93 const struct rcar_du_panel_data *panel)
96{ 94{
97 struct rcar_du_lvds_connector *lvdscon; 95 struct rcar_du_lvds_connector *lvdscon;
98 struct drm_connector *connector; 96 struct drm_connector *connector;
@@ -131,86 +129,3 @@ static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
131 129
132 return 0; 130 return 0;
133} 131}
134
135/* -----------------------------------------------------------------------------
136 * Encoder
137 */
138
139static void rcar_du_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
140{
141}
142
143static bool rcar_du_lvds_encoder_mode_fixup(struct drm_encoder *encoder,
144 const struct drm_display_mode *mode,
145 struct drm_display_mode *adjusted_mode)
146{
147 const struct drm_display_mode *panel_mode;
148 struct drm_device *dev = encoder->dev;
149 struct drm_connector *connector;
150 bool found = false;
151
152 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
153 if (connector->encoder == encoder) {
154 found = true;
155 break;
156 }
157 }
158
159 if (!found) {
160 dev_dbg(dev->dev, "mode_fixup: no connector found\n");
161 return false;
162 }
163
164 if (list_empty(&connector->modes)) {
165 dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
166 return false;
167 }
168
169 panel_mode = list_first_entry(&connector->modes,
170 struct drm_display_mode, head);
171
172 /* We're not allowed to modify the resolution. */
173 if (mode->hdisplay != panel_mode->hdisplay ||
174 mode->vdisplay != panel_mode->vdisplay)
175 return false;
176
177 /* The flat panel mode is fixed, just copy it to the adjusted mode. */
178 drm_mode_copy(adjusted_mode, panel_mode);
179
180 return true;
181}
182
183static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
184 .dpms = rcar_du_lvds_encoder_dpms,
185 .mode_fixup = rcar_du_lvds_encoder_mode_fixup,
186 .prepare = rcar_du_encoder_mode_prepare,
187 .commit = rcar_du_encoder_mode_commit,
188 .mode_set = rcar_du_encoder_mode_set,
189};
190
191static const struct drm_encoder_funcs encoder_funcs = {
192 .destroy = drm_encoder_cleanup,
193};
194
195int rcar_du_lvds_init(struct rcar_du_device *rcdu,
196 const struct rcar_du_encoder_lvds_data *data,
197 unsigned int output)
198{
199 struct rcar_du_encoder *renc;
200 int ret;
201
202 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
203 if (renc == NULL)
204 return -ENOMEM;
205
206 renc->output = output;
207
208 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
209 DRM_MODE_ENCODER_LVDS);
210 if (ret < 0)
211 return ret;
212
213 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
214
215 return rcar_du_lvds_connector_init(rcdu, renc, &data->panel);
216}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index b47f8328e103..bff8683699ca 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_lvds.h -- R-Car Display Unit LVDS Encoder and Connector 2 * rcar_du_lvdscon.h -- R-Car Display Unit LVDS Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -11,14 +11,15 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#ifndef __RCAR_DU_LVDS_H__ 14#ifndef __RCAR_DU_LVDSCON_H__
15#define __RCAR_DU_LVDS_H__ 15#define __RCAR_DU_LVDSCON_H__
16 16
17struct rcar_du_device; 17struct rcar_du_device;
18struct rcar_du_encoder_lvds_data; 18struct rcar_du_encoder;
19struct rcar_du_panel_data;
19 20
20int rcar_du_lvds_init(struct rcar_du_device *rcdu, 21int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
21 const struct rcar_du_encoder_lvds_data *data, 22 struct rcar_du_encoder *renc,
22 unsigned int output); 23 const struct rcar_du_panel_data *panel);
23 24
24#endif /* __RCAR_DU_LVDS_H__ */ 25#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
new file mode 100644
index 000000000000..a0f6a1781925
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -0,0 +1,196 @@
1/*
2 * rcar_du_lvdsenc.c -- R-Car Display Unit LVDS Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19
20#include "rcar_du_drv.h"
21#include "rcar_du_encoder.h"
22#include "rcar_du_lvdsenc.h"
23#include "rcar_lvds_regs.h"
24
25struct rcar_du_lvdsenc {
26 struct rcar_du_device *dev;
27
28 unsigned int index;
29 void __iomem *mmio;
30 struct clk *clock;
31 int dpms;
32
33 enum rcar_lvds_input input;
34};
35
36static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data)
37{
38 iowrite32(data, lvds->mmio + reg);
39}
40
41static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
42 struct rcar_du_crtc *rcrtc)
43{
44 const struct drm_display_mode *mode = &rcrtc->crtc.mode;
45 unsigned int freq = mode->clock;
46 u32 lvdcr0;
47 u32 pllcr;
48 int ret;
49
50 if (lvds->dpms == DRM_MODE_DPMS_ON)
51 return 0;
52
53 ret = clk_prepare_enable(lvds->clock);
54 if (ret < 0)
55 return ret;
56
57 /* PLL clock configuration */
58 if (freq <= 38000)
59 pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
60 else if (freq <= 60000)
61 pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
62 else if (freq <= 121000)
63 pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
64 else
65 pllcr = LVDPLLCR_PLLDLYCNT_150M;
66
67 rcar_lvds_write(lvds, LVDPLLCR, pllcr);
68
69 /* Hardcode the channels and control signals routing for now.
70 *
71 * HSYNC -> CTRL0
72 * VSYNC -> CTRL1
73 * DISP -> CTRL2
74 * 0 -> CTRL3
75 *
76 * Channels 1 and 3 are switched on ES1.
77 */
78 rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
79 LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
80 LVDCTRCR_CTR0SEL_HSYNC);
81 rcar_lvds_write(lvds, LVDCHCR,
82 LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
83 LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1));
84
85 /* Select the input, hardcode mode 0, enable LVDS operation and turn
86 * bias circuitry on.
87 */
88 lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
89 if (rcrtc->index == 2)
90 lvdcr0 |= LVDCR0_DUSEL;
91 rcar_lvds_write(lvds, LVDCR0, lvdcr0);
92
93 /* Turn all the channels on. */
94 rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) |
95 LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY);
96
97 /* Turn the PLL on, wait for the startup delay, and turn the output
98 * on.
99 */
100 lvdcr0 |= LVDCR0_PLLEN;
101 rcar_lvds_write(lvds, LVDCR0, lvdcr0);
102
103 usleep_range(100, 150);
104
105 lvdcr0 |= LVDCR0_LVRES;
106 rcar_lvds_write(lvds, LVDCR0, lvdcr0);
107
108 lvds->dpms = DRM_MODE_DPMS_ON;
109 return 0;
110}
111
112static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
113{
114 if (lvds->dpms == DRM_MODE_DPMS_OFF)
115 return;
116
117 rcar_lvds_write(lvds, LVDCR0, 0);
118 rcar_lvds_write(lvds, LVDCR1, 0);
119
120 clk_disable_unprepare(lvds->clock);
121
122 lvds->dpms = DRM_MODE_DPMS_OFF;
123}
124
125int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
126 struct drm_crtc *crtc, int mode)
127{
128 if (mode == DRM_MODE_DPMS_OFF) {
129 rcar_du_lvdsenc_stop(lvds);
130 return 0;
131 } else if (crtc) {
132 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
133 return rcar_du_lvdsenc_start(lvds, rcrtc);
134 } else
135 return -EINVAL;
136}
137
138static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
139 struct platform_device *pdev)
140{
141 struct resource *mem;
142 char name[7];
143
144 sprintf(name, "lvds.%u", lvds->index);
145
146 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
147 if (mem == NULL) {
148 dev_err(&pdev->dev, "failed to get memory resource for %s\n",
149 name);
150 return -EINVAL;
151 }
152
153 lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
154 if (lvds->mmio == NULL) {
155 dev_err(&pdev->dev, "failed to remap memory resource for %s\n",
156 name);
157 return -ENOMEM;
158 }
159
160 lvds->clock = devm_clk_get(&pdev->dev, name);
161 if (IS_ERR(lvds->clock)) {
162 dev_err(&pdev->dev, "failed to get clock for %s\n", name);
163 return PTR_ERR(lvds->clock);
164 }
165
166 return 0;
167}
168
169int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
170{
171 struct platform_device *pdev = to_platform_device(rcdu->dev);
172 struct rcar_du_lvdsenc *lvds;
173 unsigned int i;
174 int ret;
175
176 for (i = 0; i < rcdu->info->num_lvds; ++i) {
177 lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
178 if (lvds == NULL) {
179 dev_err(&pdev->dev, "failed to allocate private data\n");
180 return -ENOMEM;
181 }
182
183 lvds->dev = rcdu;
184 lvds->index = i;
185 lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0;
186 lvds->dpms = DRM_MODE_DPMS_OFF;
187
188 ret = rcar_du_lvdsenc_get_resources(lvds, pdev);
189 if (ret < 0)
190 return ret;
191
192 rcdu->lvds[i] = lvds;
193 }
194
195 return 0;
196}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
new file mode 100644
index 000000000000..7051c6de19ae
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -0,0 +1,46 @@
1/*
2 * rcar_du_lvdsenc.h -- R-Car Display Unit LVDS Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_LVDSENC_H__
15#define __RCAR_DU_LVDSENC_H__
16
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/platform_data/rcar-du.h>
20
21struct rcar_drm_crtc;
22struct rcar_du_lvdsenc;
23
24enum rcar_lvds_input {
25 RCAR_LVDS_INPUT_DU0,
26 RCAR_LVDS_INPUT_DU1,
27 RCAR_LVDS_INPUT_DU2,
28};
29
30#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
31int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
32int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
33 struct drm_crtc *crtc, int mode);
34#else
35static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
36{
37 return 0;
38}
39static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
40 struct drm_crtc *crtc, int mode)
41{
42 return 0;
43}
44#endif
45
46#endif /* __RCAR_DU_LVDSENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index a65f81ddf51d..53000644733f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -36,90 +36,95 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
36 return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane; 36 return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
37} 37}
38 38
39static u32 rcar_du_plane_read(struct rcar_du_device *rcdu, 39static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
40 unsigned int index, u32 reg) 40 unsigned int index, u32 reg)
41{ 41{
42 return rcar_du_read(rcdu, index * PLANE_OFF + reg); 42 return rcar_du_read(rgrp->dev,
43 rgrp->mmio_offset + index * PLANE_OFF + reg);
43} 44}
44 45
45static void rcar_du_plane_write(struct rcar_du_device *rcdu, 46static void rcar_du_plane_write(struct rcar_du_group *rgrp,
46 unsigned int index, u32 reg, u32 data) 47 unsigned int index, u32 reg, u32 data)
47{ 48{
48 rcar_du_write(rcdu, index * PLANE_OFF + reg, data); 49 rcar_du_write(rgrp->dev, rgrp->mmio_offset + index * PLANE_OFF + reg,
50 data);
49} 51}
50 52
51int rcar_du_plane_reserve(struct rcar_du_plane *plane, 53int rcar_du_plane_reserve(struct rcar_du_plane *plane,
52 const struct rcar_du_format_info *format) 54 const struct rcar_du_format_info *format)
53{ 55{
54 struct rcar_du_device *rcdu = plane->dev; 56 struct rcar_du_group *rgrp = plane->group;
55 unsigned int i; 57 unsigned int i;
56 int ret = -EBUSY; 58 int ret = -EBUSY;
57 59
58 mutex_lock(&rcdu->planes.lock); 60 mutex_lock(&rgrp->planes.lock);
59 61
60 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 62 for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) {
61 if (!(rcdu->planes.free & (1 << i))) 63 if (!(rgrp->planes.free & (1 << i)))
62 continue; 64 continue;
63 65
64 if (format->planes == 1 || 66 if (format->planes == 1 ||
65 rcdu->planes.free & (1 << ((i + 1) % 8))) 67 rgrp->planes.free & (1 << ((i + 1) % 8)))
66 break; 68 break;
67 } 69 }
68 70
69 if (i == ARRAY_SIZE(rcdu->planes.planes)) 71 if (i == ARRAY_SIZE(rgrp->planes.planes))
70 goto done; 72 goto done;
71 73
72 rcdu->planes.free &= ~(1 << i); 74 rgrp->planes.free &= ~(1 << i);
73 if (format->planes == 2) 75 if (format->planes == 2)
74 rcdu->planes.free &= ~(1 << ((i + 1) % 8)); 76 rgrp->planes.free &= ~(1 << ((i + 1) % 8));
75 77
76 plane->hwindex = i; 78 plane->hwindex = i;
77 79
78 ret = 0; 80 ret = 0;
79 81
80done: 82done:
81 mutex_unlock(&rcdu->planes.lock); 83 mutex_unlock(&rgrp->planes.lock);
82 return ret; 84 return ret;
83} 85}
84 86
85void rcar_du_plane_release(struct rcar_du_plane *plane) 87void rcar_du_plane_release(struct rcar_du_plane *plane)
86{ 88{
87 struct rcar_du_device *rcdu = plane->dev; 89 struct rcar_du_group *rgrp = plane->group;
88 90
89 if (plane->hwindex == -1) 91 if (plane->hwindex == -1)
90 return; 92 return;
91 93
92 mutex_lock(&rcdu->planes.lock); 94 mutex_lock(&rgrp->planes.lock);
93 rcdu->planes.free |= 1 << plane->hwindex; 95 rgrp->planes.free |= 1 << plane->hwindex;
94 if (plane->format->planes == 2) 96 if (plane->format->planes == 2)
95 rcdu->planes.free |= 1 << ((plane->hwindex + 1) % 8); 97 rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8);
96 mutex_unlock(&rcdu->planes.lock); 98 mutex_unlock(&rgrp->planes.lock);
97 99
98 plane->hwindex = -1; 100 plane->hwindex = -1;
99} 101}
100 102
101void rcar_du_plane_update_base(struct rcar_du_plane *plane) 103void rcar_du_plane_update_base(struct rcar_du_plane *plane)
102{ 104{
103 struct rcar_du_device *rcdu = plane->dev; 105 struct rcar_du_group *rgrp = plane->group;
104 unsigned int index = plane->hwindex; 106 unsigned int index = plane->hwindex;
105 107
106 /* According to the datasheet the Y position is expressed in raster line 108 /* The Y position is expressed in raster line units and must be doubled
107 * units. However, 32bpp formats seem to require a doubled Y position 109 * for 32bpp formats, according to the R8A7790 datasheet. No mention of
108 * value. Similarly, for the second plane, NV12 and NV21 formats seem to 110 * doubling the Y position is found in the R8A7779 datasheet, but the
111 * rule seems to apply there as well.
112 *
113 * Similarly, for the second plane, NV12 and NV21 formats seem to
109 * require a halved Y position value. 114 * require a halved Y position value.
110 */ 115 */
111 rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x); 116 rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
112 rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y * 117 rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
113 (plane->format->bpp == 32 ? 2 : 1)); 118 (plane->format->bpp == 32 ? 2 : 1));
114 rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[0]); 119 rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
115 120
116 if (plane->format->planes == 2) { 121 if (plane->format->planes == 2) {
117 index = (index + 1) % 8; 122 index = (index + 1) % 8;
118 123
119 rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x); 124 rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
120 rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y * 125 rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
121 (plane->format->bpp == 16 ? 2 : 1) / 2); 126 (plane->format->bpp == 16 ? 2 : 1) / 2);
122 rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[1]); 127 rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]);
123 } 128 }
124} 129}
125 130
@@ -140,7 +145,7 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
140static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, 145static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
141 unsigned int index) 146 unsigned int index)
142{ 147{
143 struct rcar_du_device *rcdu = plane->dev; 148 struct rcar_du_group *rgrp = plane->group;
144 u32 colorkey; 149 u32 colorkey;
145 u32 pnmr; 150 u32 pnmr;
146 151
@@ -154,9 +159,9 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
154 * enable alpha-blending regardless of the X bit value. 159 * enable alpha-blending regardless of the X bit value.
155 */ 160 */
156 if (plane->format->fourcc != DRM_FORMAT_XRGB1555) 161 if (plane->format->fourcc != DRM_FORMAT_XRGB1555)
157 rcar_du_plane_write(rcdu, index, PnALPHAR, PnALPHAR_ABIT_0); 162 rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
158 else 163 else
159 rcar_du_plane_write(rcdu, index, PnALPHAR, 164 rcar_du_plane_write(rgrp, index, PnALPHAR,
160 PnALPHAR_ABIT_X | plane->alpha); 165 PnALPHAR_ABIT_X | plane->alpha);
161 166
162 pnmr = PnMR_BM_MD | plane->format->pnmr; 167 pnmr = PnMR_BM_MD | plane->format->pnmr;
@@ -172,14 +177,14 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
172 if (plane->format->fourcc == DRM_FORMAT_YUYV) 177 if (plane->format->fourcc == DRM_FORMAT_YUYV)
173 pnmr |= PnMR_YCDF_YUYV; 178 pnmr |= PnMR_YCDF_YUYV;
174 179
175 rcar_du_plane_write(rcdu, index, PnMR, pnmr); 180 rcar_du_plane_write(rgrp, index, PnMR, pnmr);
176 181
177 switch (plane->format->fourcc) { 182 switch (plane->format->fourcc) {
178 case DRM_FORMAT_RGB565: 183 case DRM_FORMAT_RGB565:
179 colorkey = ((plane->colorkey & 0xf80000) >> 8) 184 colorkey = ((plane->colorkey & 0xf80000) >> 8)
180 | ((plane->colorkey & 0x00fc00) >> 5) 185 | ((plane->colorkey & 0x00fc00) >> 5)
181 | ((plane->colorkey & 0x0000f8) >> 3); 186 | ((plane->colorkey & 0x0000f8) >> 3);
182 rcar_du_plane_write(rcdu, index, PnTC2R, colorkey); 187 rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
183 break; 188 break;
184 189
185 case DRM_FORMAT_ARGB1555: 190 case DRM_FORMAT_ARGB1555:
@@ -187,12 +192,12 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
187 colorkey = ((plane->colorkey & 0xf80000) >> 9) 192 colorkey = ((plane->colorkey & 0xf80000) >> 9)
188 | ((plane->colorkey & 0x00f800) >> 6) 193 | ((plane->colorkey & 0x00f800) >> 6)
189 | ((plane->colorkey & 0x0000f8) >> 3); 194 | ((plane->colorkey & 0x0000f8) >> 3);
190 rcar_du_plane_write(rcdu, index, PnTC2R, colorkey); 195 rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
191 break; 196 break;
192 197
193 case DRM_FORMAT_XRGB8888: 198 case DRM_FORMAT_XRGB8888:
194 case DRM_FORMAT_ARGB8888: 199 case DRM_FORMAT_ARGB8888:
195 rcar_du_plane_write(rcdu, index, PnTC3R, 200 rcar_du_plane_write(rgrp, index, PnTC3R,
196 PnTC3R_CODE | (plane->colorkey & 0xffffff)); 201 PnTC3R_CODE | (plane->colorkey & 0xffffff));
197 break; 202 break;
198 } 203 }
@@ -201,7 +206,7 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
201static void __rcar_du_plane_setup(struct rcar_du_plane *plane, 206static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
202 unsigned int index) 207 unsigned int index)
203{ 208{
204 struct rcar_du_device *rcdu = plane->dev; 209 struct rcar_du_group *rgrp = plane->group;
205 u32 ddcr2 = PnDDCR2_CODE; 210 u32 ddcr2 = PnDDCR2_CODE;
206 u32 ddcr4; 211 u32 ddcr4;
207 u32 mwr; 212 u32 mwr;
@@ -211,7 +216,7 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
211 * The data format is selected by the DDDF field in PnMR and the EDF 216 * The data format is selected by the DDDF field in PnMR and the EDF
212 * field in DDCR4. 217 * field in DDCR4.
213 */ 218 */
214 ddcr4 = rcar_du_plane_read(rcdu, index, PnDDCR4); 219 ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
215 ddcr4 &= ~PnDDCR4_EDF_MASK; 220 ddcr4 &= ~PnDDCR4_EDF_MASK;
216 ddcr4 |= plane->format->edf | PnDDCR4_CODE; 221 ddcr4 |= plane->format->edf | PnDDCR4_CODE;
217 222
@@ -232,8 +237,8 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
232 } 237 }
233 } 238 }
234 239
235 rcar_du_plane_write(rcdu, index, PnDDCR2, ddcr2); 240 rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
236 rcar_du_plane_write(rcdu, index, PnDDCR4, ddcr4); 241 rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
237 242
238 /* Memory pitch (expressed in pixels) */ 243 /* Memory pitch (expressed in pixels) */
239 if (plane->format->planes == 2) 244 if (plane->format->planes == 2)
@@ -241,19 +246,19 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
241 else 246 else
242 mwr = plane->pitch * 8 / plane->format->bpp; 247 mwr = plane->pitch * 8 / plane->format->bpp;
243 248
244 rcar_du_plane_write(rcdu, index, PnMWR, mwr); 249 rcar_du_plane_write(rgrp, index, PnMWR, mwr);
245 250
246 /* Destination position and size */ 251 /* Destination position and size */
247 rcar_du_plane_write(rcdu, index, PnDSXR, plane->width); 252 rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
248 rcar_du_plane_write(rcdu, index, PnDSYR, plane->height); 253 rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
249 rcar_du_plane_write(rcdu, index, PnDPXR, plane->dst_x); 254 rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x);
250 rcar_du_plane_write(rcdu, index, PnDPYR, plane->dst_y); 255 rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y);
251 256
252 /* Wrap-around and blinking, disabled */ 257 /* Wrap-around and blinking, disabled */
253 rcar_du_plane_write(rcdu, index, PnWASPR, 0); 258 rcar_du_plane_write(rgrp, index, PnWASPR, 0);
254 rcar_du_plane_write(rcdu, index, PnWAMWR, 4095); 259 rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
255 rcar_du_plane_write(rcdu, index, PnBTR, 0); 260 rcar_du_plane_write(rgrp, index, PnBTR, 0);
256 rcar_du_plane_write(rcdu, index, PnMLR, 0); 261 rcar_du_plane_write(rgrp, index, PnMLR, 0);
257} 262}
258 263
259void rcar_du_plane_setup(struct rcar_du_plane *plane) 264void rcar_du_plane_setup(struct rcar_du_plane *plane)
@@ -273,7 +278,7 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
273 uint32_t src_w, uint32_t src_h) 278 uint32_t src_w, uint32_t src_h)
274{ 279{
275 struct rcar_du_plane *rplane = to_rcar_plane(plane); 280 struct rcar_du_plane *rplane = to_rcar_plane(plane);
276 struct rcar_du_device *rcdu = plane->dev->dev_private; 281 struct rcar_du_device *rcdu = rplane->group->dev;
277 const struct rcar_du_format_info *format; 282 const struct rcar_du_format_info *format;
278 unsigned int nplanes; 283 unsigned int nplanes;
279 int ret; 284 int ret;
@@ -316,26 +321,25 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
316 rcar_du_plane_compute_base(rplane, fb); 321 rcar_du_plane_compute_base(rplane, fb);
317 rcar_du_plane_setup(rplane); 322 rcar_du_plane_setup(rplane);
318 323
319 mutex_lock(&rcdu->planes.lock); 324 mutex_lock(&rplane->group->planes.lock);
320 rplane->enabled = true; 325 rplane->enabled = true;
321 rcar_du_crtc_update_planes(rplane->crtc); 326 rcar_du_crtc_update_planes(rplane->crtc);
322 mutex_unlock(&rcdu->planes.lock); 327 mutex_unlock(&rplane->group->planes.lock);
323 328
324 return 0; 329 return 0;
325} 330}
326 331
327static int rcar_du_plane_disable(struct drm_plane *plane) 332static int rcar_du_plane_disable(struct drm_plane *plane)
328{ 333{
329 struct rcar_du_device *rcdu = plane->dev->dev_private;
330 struct rcar_du_plane *rplane = to_rcar_plane(plane); 334 struct rcar_du_plane *rplane = to_rcar_plane(plane);
331 335
332 if (!rplane->enabled) 336 if (!rplane->enabled)
333 return 0; 337 return 0;
334 338
335 mutex_lock(&rcdu->planes.lock); 339 mutex_lock(&rplane->group->planes.lock);
336 rplane->enabled = false; 340 rplane->enabled = false;
337 rcar_du_crtc_update_planes(rplane->crtc); 341 rcar_du_crtc_update_planes(rplane->crtc);
338 mutex_unlock(&rcdu->planes.lock); 342 mutex_unlock(&rplane->group->planes.lock);
339 343
340 rcar_du_plane_release(rplane); 344 rcar_du_plane_release(rplane);
341 345
@@ -377,9 +381,7 @@ static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane,
377static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane, 381static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
378 unsigned int zpos) 382 unsigned int zpos)
379{ 383{
380 struct rcar_du_device *rcdu = plane->dev; 384 mutex_lock(&plane->group->planes.lock);
381
382 mutex_lock(&rcdu->planes.lock);
383 if (plane->zpos == zpos) 385 if (plane->zpos == zpos)
384 goto done; 386 goto done;
385 387
@@ -390,21 +392,21 @@ static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
390 rcar_du_crtc_update_planes(plane->crtc); 392 rcar_du_crtc_update_planes(plane->crtc);
391 393
392done: 394done:
393 mutex_unlock(&rcdu->planes.lock); 395 mutex_unlock(&plane->group->planes.lock);
394} 396}
395 397
396static int rcar_du_plane_set_property(struct drm_plane *plane, 398static int rcar_du_plane_set_property(struct drm_plane *plane,
397 struct drm_property *property, 399 struct drm_property *property,
398 uint64_t value) 400 uint64_t value)
399{ 401{
400 struct rcar_du_device *rcdu = plane->dev->dev_private;
401 struct rcar_du_plane *rplane = to_rcar_plane(plane); 402 struct rcar_du_plane *rplane = to_rcar_plane(plane);
403 struct rcar_du_group *rgrp = rplane->group;
402 404
403 if (property == rcdu->planes.alpha) 405 if (property == rgrp->planes.alpha)
404 rcar_du_plane_set_alpha(rplane, value); 406 rcar_du_plane_set_alpha(rplane, value);
405 else if (property == rcdu->planes.colorkey) 407 else if (property == rgrp->planes.colorkey)
406 rcar_du_plane_set_colorkey(rplane, value); 408 rcar_du_plane_set_colorkey(rplane, value);
407 else if (property == rcdu->planes.zpos) 409 else if (property == rgrp->planes.zpos)
408 rcar_du_plane_set_zpos(rplane, value); 410 rcar_du_plane_set_zpos(rplane, value);
409 else 411 else
410 return -EINVAL; 412 return -EINVAL;
@@ -432,37 +434,39 @@ static const uint32_t formats[] = {
432 DRM_FORMAT_NV16, 434 DRM_FORMAT_NV16,
433}; 435};
434 436
435int rcar_du_plane_init(struct rcar_du_device *rcdu) 437int rcar_du_planes_init(struct rcar_du_group *rgrp)
436{ 438{
439 struct rcar_du_planes *planes = &rgrp->planes;
440 struct rcar_du_device *rcdu = rgrp->dev;
437 unsigned int i; 441 unsigned int i;
438 442
439 mutex_init(&rcdu->planes.lock); 443 mutex_init(&planes->lock);
440 rcdu->planes.free = 0xff; 444 planes->free = 0xff;
441 445
442 rcdu->planes.alpha = 446 planes->alpha =
443 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255); 447 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
444 if (rcdu->planes.alpha == NULL) 448 if (planes->alpha == NULL)
445 return -ENOMEM; 449 return -ENOMEM;
446 450
447 /* The color key is expressed as an RGB888 triplet stored in a 32-bit 451 /* The color key is expressed as an RGB888 triplet stored in a 32-bit
448 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0) 452 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
449 * or enable source color keying (1). 453 * or enable source color keying (1).
450 */ 454 */
451 rcdu->planes.colorkey = 455 planes->colorkey =
452 drm_property_create_range(rcdu->ddev, 0, "colorkey", 456 drm_property_create_range(rcdu->ddev, 0, "colorkey",
453 0, 0x01ffffff); 457 0, 0x01ffffff);
454 if (rcdu->planes.colorkey == NULL) 458 if (planes->colorkey == NULL)
455 return -ENOMEM; 459 return -ENOMEM;
456 460
457 rcdu->planes.zpos = 461 planes->zpos =
458 drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7); 462 drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
459 if (rcdu->planes.zpos == NULL) 463 if (planes->zpos == NULL)
460 return -ENOMEM; 464 return -ENOMEM;
461 465
462 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 466 for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) {
463 struct rcar_du_plane *plane = &rcdu->planes.planes[i]; 467 struct rcar_du_plane *plane = &planes->planes[i];
464 468
465 plane->dev = rcdu; 469 plane->group = rgrp;
466 plane->hwindex = -1; 470 plane->hwindex = -1;
467 plane->alpha = 255; 471 plane->alpha = 255;
468 plane->colorkey = RCAR_DU_COLORKEY_NONE; 472 plane->colorkey = RCAR_DU_COLORKEY_NONE;
@@ -472,11 +476,16 @@ int rcar_du_plane_init(struct rcar_du_device *rcdu)
472 return 0; 476 return 0;
473} 477}
474 478
475int rcar_du_plane_register(struct rcar_du_device *rcdu) 479int rcar_du_planes_register(struct rcar_du_group *rgrp)
476{ 480{
481 struct rcar_du_planes *planes = &rgrp->planes;
482 struct rcar_du_device *rcdu = rgrp->dev;
483 unsigned int crtcs;
477 unsigned int i; 484 unsigned int i;
478 int ret; 485 int ret;
479 486
487 crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
488
480 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) { 489 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
481 struct rcar_du_kms_plane *plane; 490 struct rcar_du_kms_plane *plane;
482 491
@@ -484,23 +493,22 @@ int rcar_du_plane_register(struct rcar_du_device *rcdu)
484 if (plane == NULL) 493 if (plane == NULL)
485 return -ENOMEM; 494 return -ENOMEM;
486 495
487 plane->hwplane = &rcdu->planes.planes[i + 2]; 496 plane->hwplane = &planes->planes[i + 2];
488 plane->hwplane->zpos = 1; 497 plane->hwplane->zpos = 1;
489 498
490 ret = drm_plane_init(rcdu->ddev, &plane->plane, 499 ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs,
491 (1 << rcdu->num_crtcs) - 1,
492 &rcar_du_plane_funcs, formats, 500 &rcar_du_plane_funcs, formats,
493 ARRAY_SIZE(formats), false); 501 ARRAY_SIZE(formats), false);
494 if (ret < 0) 502 if (ret < 0)
495 return ret; 503 return ret;
496 504
497 drm_object_attach_property(&plane->plane.base, 505 drm_object_attach_property(&plane->plane.base,
498 rcdu->planes.alpha, 255); 506 planes->alpha, 255);
499 drm_object_attach_property(&plane->plane.base, 507 drm_object_attach_property(&plane->plane.base,
500 rcdu->planes.colorkey, 508 planes->colorkey,
501 RCAR_DU_COLORKEY_NONE); 509 RCAR_DU_COLORKEY_NONE);
502 drm_object_attach_property(&plane->plane.base, 510 drm_object_attach_property(&plane->plane.base,
503 rcdu->planes.zpos, 1); 511 planes->zpos, 1);
504 } 512 }
505 513
506 return 0; 514 return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 5397dba2fe57..f94f9ce84998 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -14,10 +14,13 @@
14#ifndef __RCAR_DU_PLANE_H__ 14#ifndef __RCAR_DU_PLANE_H__
15#define __RCAR_DU_PLANE_H__ 15#define __RCAR_DU_PLANE_H__
16 16
17struct drm_crtc; 17#include <linux/mutex.h>
18struct drm_framebuffer; 18
19struct rcar_du_device; 19#include <drm/drmP.h>
20#include <drm/drm_crtc.h>
21
20struct rcar_du_format_info; 22struct rcar_du_format_info;
23struct rcar_du_group;
21 24
22/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As 25/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As
23 * using KMS planes requires at least one of the CRTCs being enabled, no more 26 * using KMS planes requires at least one of the CRTCs being enabled, no more
@@ -30,7 +33,7 @@ struct rcar_du_format_info;
30#define RCAR_DU_NUM_SW_PLANES 9 33#define RCAR_DU_NUM_SW_PLANES 9
31 34
32struct rcar_du_plane { 35struct rcar_du_plane {
33 struct rcar_du_device *dev; 36 struct rcar_du_group *group;
34 struct drm_crtc *crtc; 37 struct drm_crtc *crtc;
35 38
36 bool enabled; 39 bool enabled;
@@ -54,8 +57,19 @@ struct rcar_du_plane {
54 unsigned int dst_y; 57 unsigned int dst_y;
55}; 58};
56 59
57int rcar_du_plane_init(struct rcar_du_device *rcdu); 60struct rcar_du_planes {
58int rcar_du_plane_register(struct rcar_du_device *rcdu); 61 struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
62 unsigned int free;
63 struct mutex lock;
64
65 struct drm_property *alpha;
66 struct drm_property *colorkey;
67 struct drm_property *zpos;
68};
69
70int rcar_du_planes_init(struct rcar_du_group *rgrp);
71int rcar_du_planes_register(struct rcar_du_group *rgrp);
72
59void rcar_du_plane_setup(struct rcar_du_plane *plane); 73void rcar_du_plane_setup(struct rcar_du_plane *plane);
60void rcar_du_plane_update_base(struct rcar_du_plane *plane); 74void rcar_du_plane_update_base(struct rcar_du_plane *plane);
61void rcar_du_plane_compute_base(struct rcar_du_plane *plane, 75void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 69f21f19b51c..73f7347f740b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -13,14 +13,15 @@
13#ifndef __RCAR_DU_REGS_H__ 13#ifndef __RCAR_DU_REGS_H__
14#define __RCAR_DU_REGS_H__ 14#define __RCAR_DU_REGS_H__
15 15
16#define DISP2_REG_OFFSET 0x30000 16#define DU0_REG_OFFSET 0x00000
17#define DU1_REG_OFFSET 0x30000
18#define DU2_REG_OFFSET 0x40000
17 19
18/* ----------------------------------------------------------------------------- 20/* -----------------------------------------------------------------------------
19 * Display Control Registers 21 * Display Control Registers
20 */ 22 */
21 23
22#define DSYSR 0x00000 /* display 1 */ 24#define DSYSR 0x00000 /* display 1 */
23#define D2SYSR 0x30000 /* display 2 */
24#define DSYSR_ILTS (1 << 29) 25#define DSYSR_ILTS (1 << 29)
25#define DSYSR_DSEC (1 << 20) 26#define DSYSR_DSEC (1 << 20)
26#define DSYSR_IUPD (1 << 16) 27#define DSYSR_IUPD (1 << 16)
@@ -35,7 +36,6 @@
35#define DSYSR_SCM_INT_VIDEO (3 << 4) 36#define DSYSR_SCM_INT_VIDEO (3 << 4)
36 37
37#define DSMR 0x00004 38#define DSMR 0x00004
38#define D2SMR 0x30004
39#define DSMR_VSPM (1 << 28) 39#define DSMR_VSPM (1 << 28)
40#define DSMR_ODPM (1 << 27) 40#define DSMR_ODPM (1 << 27)
41#define DSMR_DIPM_DISP (0 << 25) 41#define DSMR_DIPM_DISP (0 << 25)
@@ -60,7 +60,6 @@
60#define DSMR_CSY_MASK (3 << 6) 60#define DSMR_CSY_MASK (3 << 6)
61 61
62#define DSSR 0x00008 62#define DSSR 0x00008
63#define D2SSR 0x30008
64#define DSSR_VC1FB_DSA0 (0 << 30) 63#define DSSR_VC1FB_DSA0 (0 << 30)
65#define DSSR_VC1FB_DSA1 (1 << 30) 64#define DSSR_VC1FB_DSA1 (1 << 30)
66#define DSSR_VC1FB_DSA2 (2 << 30) 65#define DSSR_VC1FB_DSA2 (2 << 30)
@@ -80,7 +79,6 @@
80#define DSSR_ADC(n) (1 << ((n)-1)) 79#define DSSR_ADC(n) (1 << ((n)-1))
81 80
82#define DSRCR 0x0000c 81#define DSRCR 0x0000c
83#define D2SRCR 0x3000c
84#define DSRCR_TVCL (1 << 15) 82#define DSRCR_TVCL (1 << 15)
85#define DSRCR_FRCL (1 << 14) 83#define DSRCR_FRCL (1 << 14)
86#define DSRCR_VBCL (1 << 11) 84#define DSRCR_VBCL (1 << 11)
@@ -90,7 +88,6 @@
90#define DSRCR_MASK 0x0000cbff 88#define DSRCR_MASK 0x0000cbff
91 89
92#define DIER 0x00010 90#define DIER 0x00010
93#define D2IER 0x30010
94#define DIER_TVE (1 << 15) 91#define DIER_TVE (1 << 15)
95#define DIER_FRE (1 << 14) 92#define DIER_FRE (1 << 14)
96#define DIER_VBE (1 << 11) 93#define DIER_VBE (1 << 11)
@@ -114,7 +111,6 @@
114#define DPPR_BPP32 (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */ 111#define DPPR_BPP32 (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */
115 112
116#define DEFR 0x00020 113#define DEFR 0x00020
117#define D2EFR 0x30020
118#define DEFR_CODE (0x7773 << 16) 114#define DEFR_CODE (0x7773 << 16)
119#define DEFR_EXSL (1 << 12) 115#define DEFR_EXSL (1 << 12)
120#define DEFR_EXVL (1 << 11) 116#define DEFR_EXVL (1 << 11)
@@ -137,12 +133,10 @@
137#define DCPCR_DCE (1 << 0) 133#define DCPCR_DCE (1 << 0)
138 134
139#define DEFR2 0x00034 135#define DEFR2 0x00034
140#define D2EFR2 0x30034
141#define DEFR2_CODE (0x7775 << 16) 136#define DEFR2_CODE (0x7775 << 16)
142#define DEFR2_DEFE2G (1 << 0) 137#define DEFR2_DEFE2G (1 << 0)
143 138
144#define DEFR3 0x00038 139#define DEFR3 0x00038
145#define D2EFR3 0x30038
146#define DEFR3_CODE (0x7776 << 16) 140#define DEFR3_CODE (0x7776 << 16)
147#define DEFR3_EVDA (1 << 14) 141#define DEFR3_EVDA (1 << 14)
148#define DEFR3_EVDM_1 (1 << 12) 142#define DEFR3_EVDM_1 (1 << 12)
@@ -153,7 +147,6 @@
153#define DEFR3_DEFE3 (1 << 0) 147#define DEFR3_DEFE3 (1 << 0)
154 148
155#define DEFR4 0x0003c 149#define DEFR4 0x0003c
156#define D2EFR4 0x3003c
157#define DEFR4_CODE (0x7777 << 16) 150#define DEFR4_CODE (0x7777 << 16)
158#define DEFR4_LRUO (1 << 5) 151#define DEFR4_LRUO (1 << 5)
159#define DEFR4_SPCE (1 << 4) 152#define DEFR4_SPCE (1 << 4)
@@ -205,6 +198,68 @@
205#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2) 198#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2)
206 199
207/* ----------------------------------------------------------------------------- 200/* -----------------------------------------------------------------------------
201 * R8A7790-only Control Registers
202 */
203
204#define DD1SSR 0x20008
205#define DD1SSR_TVR (1 << 15)
206#define DD1SSR_FRM (1 << 14)
207#define DD1SSR_BUF (1 << 12)
208#define DD1SSR_VBK (1 << 11)
209#define DD1SSR_RINT (1 << 9)
210#define DD1SSR_HBK (1 << 8)
211#define DD1SSR_ADC(n) (1 << ((n)-1))
212
213#define DD1SRCR 0x2000c
214#define DD1SRCR_TVR (1 << 15)
215#define DD1SRCR_FRM (1 << 14)
216#define DD1SRCR_BUF (1 << 12)
217#define DD1SRCR_VBK (1 << 11)
218#define DD1SRCR_RINT (1 << 9)
219#define DD1SRCR_HBK (1 << 8)
220#define DD1SRCR_ADC(n) (1 << ((n)-1))
221
222#define DD1IER 0x20010
223#define DD1IER_TVR (1 << 15)
224#define DD1IER_FRM (1 << 14)
225#define DD1IER_BUF (1 << 12)
226#define DD1IER_VBK (1 << 11)
227#define DD1IER_RINT (1 << 9)
228#define DD1IER_HBK (1 << 8)
229#define DD1IER_ADC(n) (1 << ((n)-1))
230
231#define DEFR8 0x20020
232#define DEFR8_CODE (0x7790 << 16)
233#define DEFR8_VSCS (1 << 6)
234#define DEFR8_DRGBS_DU(n) ((n) << 4)
235#define DEFR8_DRGBS_MASK (3 << 4)
236#define DEFR8_DEFE8 (1 << 0)
237
238#define DOFLR 0x20024
239#define DOFLR_CODE (0x7790 << 16)
240#define DOFLR_HSYCFL1 (1 << 13)
241#define DOFLR_VSYCFL1 (1 << 12)
242#define DOFLR_ODDFL1 (1 << 11)
243#define DOFLR_DISPFL1 (1 << 10)
244#define DOFLR_CDEFL1 (1 << 9)
245#define DOFLR_RGBFL1 (1 << 8)
246#define DOFLR_HSYCFL0 (1 << 5)
247#define DOFLR_VSYCFL0 (1 << 4)
248#define DOFLR_ODDFL0 (1 << 3)
249#define DOFLR_DISPFL0 (1 << 2)
250#define DOFLR_CDEFL0 (1 << 1)
251#define DOFLR_RGBFL0 (1 << 0)
252
253#define DIDSR 0x20028
254#define DIDSR_CODE (0x7790 << 16)
255#define DIDSR_LCDS_DCLKIN(n) (0 << (8 + (n) * 2))
256#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2))
257#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2))
258#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2))
259#define DIDSR_PCDS_CLK(n, clk) (clk << ((n) * 2))
260#define DIDSR_PCDS_MASK(n) (3 << ((n) * 2))
261
262/* -----------------------------------------------------------------------------
208 * Display Timing Generation Registers 263 * Display Timing Generation Registers
209 */ 264 */
210 265
@@ -349,21 +404,34 @@
349#define APnMR_BM_AD (2 << 4) /* Auto Display Change Mode */ 404#define APnMR_BM_AD (2 << 4) /* Auto Display Change Mode */
350 405
351#define APnMWR 0x0a104 406#define APnMWR 0x0a104
407
408#define APnDSXR 0x0a110
409#define APnDSYR 0x0a114
410#define APnDPXR 0x0a118
411#define APnDPYR 0x0a11c
412
352#define APnDSA0R 0x0a120 413#define APnDSA0R 0x0a120
353#define APnDSA1R 0x0a124 414#define APnDSA1R 0x0a124
354#define APnDSA2R 0x0a128 415#define APnDSA2R 0x0a128
416
417#define APnSPXR 0x0a130
418#define APnSPYR 0x0a134
419#define APnWASPR 0x0a138
420#define APnWAMWR 0x0a13c
421
422#define APnBTR 0x0a140
423
355#define APnMLR 0x0a150 424#define APnMLR 0x0a150
425#define APnSWAPR 0x0a180
356 426
357/* ----------------------------------------------------------------------------- 427/* -----------------------------------------------------------------------------
358 * Display Capture Registers 428 * Display Capture Registers
359 */ 429 */
360 430
431#define DCMR 0x0c100
361#define DCMWR 0x0c104 432#define DCMWR 0x0c104
362#define DC2MWR 0x0c204
363#define DCSAR 0x0c120 433#define DCSAR 0x0c120
364#define DC2SAR 0x0c220
365#define DCMLR 0x0c150 434#define DCMLR 0x0c150
366#define DC2MLR 0x0c250
367 435
368/* ----------------------------------------------------------------------------- 436/* -----------------------------------------------------------------------------
369 * Color Palette Registers 437 * Color Palette Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 327289ec380d..41d563adfeaa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vga.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_vga.c -- R-Car Display Unit VGA DAC and Connector 2 * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -16,12 +16,9 @@
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include "rcar_du_drv.h" 18#include "rcar_du_drv.h"
19#include "rcar_du_encoder.h"
19#include "rcar_du_kms.h" 20#include "rcar_du_kms.h"
20#include "rcar_du_vga.h" 21#include "rcar_du_vgacon.h"
21
22/* -----------------------------------------------------------------------------
23 * Connector
24 */
25 22
26static int rcar_du_vga_connector_get_modes(struct drm_connector *connector) 23static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
27{ 24{
@@ -49,7 +46,7 @@ static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
49static enum drm_connector_status 46static enum drm_connector_status
50rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) 47rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
51{ 48{
52 return connector_status_unknown; 49 return connector_status_connected;
53} 50}
54 51
55static const struct drm_connector_funcs connector_funcs = { 52static const struct drm_connector_funcs connector_funcs = {
@@ -59,8 +56,8 @@ static const struct drm_connector_funcs connector_funcs = {
59 .destroy = rcar_du_vga_connector_destroy, 56 .destroy = rcar_du_vga_connector_destroy,
60}; 57};
61 58
62static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, 59int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
63 struct rcar_du_encoder *renc) 60 struct rcar_du_encoder *renc)
64{ 61{
65 struct rcar_du_connector *rcon; 62 struct rcar_du_connector *rcon;
66 struct drm_connector *connector; 63 struct drm_connector *connector;
@@ -97,53 +94,3 @@ static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
97 94
98 return 0; 95 return 0;
99} 96}
100
101/* -----------------------------------------------------------------------------
102 * Encoder
103 */
104
105static void rcar_du_vga_encoder_dpms(struct drm_encoder *encoder, int mode)
106{
107}
108
109static bool rcar_du_vga_encoder_mode_fixup(struct drm_encoder *encoder,
110 const struct drm_display_mode *mode,
111 struct drm_display_mode *adjusted_mode)
112{
113 return true;
114}
115
116static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
117 .dpms = rcar_du_vga_encoder_dpms,
118 .mode_fixup = rcar_du_vga_encoder_mode_fixup,
119 .prepare = rcar_du_encoder_mode_prepare,
120 .commit = rcar_du_encoder_mode_commit,
121 .mode_set = rcar_du_encoder_mode_set,
122};
123
124static const struct drm_encoder_funcs encoder_funcs = {
125 .destroy = drm_encoder_cleanup,
126};
127
128int rcar_du_vga_init(struct rcar_du_device *rcdu,
129 const struct rcar_du_encoder_vga_data *data,
130 unsigned int output)
131{
132 struct rcar_du_encoder *renc;
133 int ret;
134
135 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
136 if (renc == NULL)
137 return -ENOMEM;
138
139 renc->output = output;
140
141 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
142 DRM_MODE_ENCODER_DAC);
143 if (ret < 0)
144 return ret;
145
146 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
147
148 return rcar_du_vga_connector_init(rcdu, renc);
149}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.h b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
index 66b4d2d7190d..b12b0cf7f117 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vga.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_vga.h -- R-Car Display Unit VGA DAC and Connector 2 * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -11,14 +11,13 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#ifndef __RCAR_DU_VGA_H__ 14#ifndef __RCAR_DU_VGACON_H__
15#define __RCAR_DU_VGA_H__ 15#define __RCAR_DU_VGACON_H__
16 16
17struct rcar_du_device; 17struct rcar_du_device;
18struct rcar_du_encoder_vga_data; 18struct rcar_du_encoder;
19 19
20int rcar_du_vga_init(struct rcar_du_device *rcdu, 20int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
21 const struct rcar_du_encoder_vga_data *data, 21 struct rcar_du_encoder *renc);
22 unsigned int output);
23 22
24#endif /* __RCAR_DU_VGA_H__ */ 23#endif /* __RCAR_DU_VGACON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
new file mode 100644
index 000000000000..77cf9289ab65
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
@@ -0,0 +1,69 @@
1/*
2 * rcar_lvds_regs.h -- R-Car LVDS Interface Registers Definitions
3 *
4 * Copyright (C) 2013 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation.
11 */
12
13#ifndef __RCAR_LVDS_REGS_H__
14#define __RCAR_LVDS_REGS_H__
15
16#define LVDCR0 0x0000
17#define LVDCR0_DUSEL (1 << 15)
18#define LVDCR0_DMD (1 << 12)
19#define LVDCR0_LVMD_MASK (0xf << 8)
20#define LVDCR0_LVMD_SHIFT 8
21#define LVDCR0_PLLEN (1 << 4)
22#define LVDCR0_BEN (1 << 2)
23#define LVDCR0_LVEN (1 << 1)
24#define LVDCR0_LVRES (1 << 0)
25
26#define LVDCR1 0x0004
27#define LVDCR1_CKSEL (1 << 15)
28#define LVDCR1_CHSTBY(n) (3 << (2 + (n) * 2))
29#define LVDCR1_CLKSTBY (3 << 0)
30
31#define LVDPLLCR 0x0008
32#define LVDPLLCR_CEEN (1 << 14)
33#define LVDPLLCR_FBEN (1 << 13)
34#define LVDPLLCR_COSEL (1 << 12)
35#define LVDPLLCR_PLLDLYCNT_150M (0x1bf << 0)
36#define LVDPLLCR_PLLDLYCNT_121M (0x22c << 0)
37#define LVDPLLCR_PLLDLYCNT_60M (0x77b << 0)
38#define LVDPLLCR_PLLDLYCNT_38M (0x69a << 0)
39#define LVDPLLCR_PLLDLYCNT_MASK (0x7ff << 0)
40
41#define LVDCTRCR 0x000c
42#define LVDCTRCR_CTR3SEL_ZERO (0 << 12)
43#define LVDCTRCR_CTR3SEL_ODD (1 << 12)
44#define LVDCTRCR_CTR3SEL_CDE (2 << 12)
45#define LVDCTRCR_CTR3SEL_MASK (7 << 12)
46#define LVDCTRCR_CTR2SEL_DISP (0 << 8)
47#define LVDCTRCR_CTR2SEL_ODD (1 << 8)
48#define LVDCTRCR_CTR2SEL_CDE (2 << 8)
49#define LVDCTRCR_CTR2SEL_HSYNC (3 << 8)
50#define LVDCTRCR_CTR2SEL_VSYNC (4 << 8)
51#define LVDCTRCR_CTR2SEL_MASK (7 << 8)
52#define LVDCTRCR_CTR1SEL_VSYNC (0 << 4)
53#define LVDCTRCR_CTR1SEL_DISP (1 << 4)
54#define LVDCTRCR_CTR1SEL_ODD (2 << 4)
55#define LVDCTRCR_CTR1SEL_CDE (3 << 4)
56#define LVDCTRCR_CTR1SEL_HSYNC (4 << 4)
57#define LVDCTRCR_CTR1SEL_MASK (7 << 4)
58#define LVDCTRCR_CTR0SEL_HSYNC (0 << 0)
59#define LVDCTRCR_CTR0SEL_VSYNC (1 << 0)
60#define LVDCTRCR_CTR0SEL_DISP (2 << 0)
61#define LVDCTRCR_CTR0SEL_ODD (3 << 0)
62#define LVDCTRCR_CTR0SEL_CDE (4 << 0)
63#define LVDCTRCR_CTR0SEL_MASK (7 << 0)
64
65#define LVDCHCR 0x0010
66#define LVDCHCR_CHSEL_CH(n, c) ((((c) - (n)) & 3) << ((n) * 4))
67#define LVDCHCR_CHSEL_MASK(n) (3 << ((n) * 4))
68
69#endif /* __RCAR_LVDS_REGS_H__ */
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bd6b2cf508d5..b17d0710871a 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1072,7 +1072,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1072 drm_idlelock_release(&file_priv->master->lock); 1072 drm_idlelock_release(&file_priv->master->lock);
1073} 1073}
1074 1074
1075struct drm_ioctl_desc savage_ioctls[] = { 1075const struct drm_ioctl_desc savage_ioctls[] = {
1076 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1076 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1077 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), 1077 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1078 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), 1078 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 71b2081e7835..3c030216e888 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,7 +42,6 @@ static const struct file_operations savage_driver_fops = {
42 .unlocked_ioctl = drm_ioctl, 42 .unlocked_ioctl = drm_ioctl,
43 .mmap = drm_mmap, 43 .mmap = drm_mmap,
44 .poll = drm_poll, 44 .poll = drm_poll,
45 .fasync = drm_fasync,
46#ifdef CONFIG_COMPAT 45#ifdef CONFIG_COMPAT
47 .compat_ioctl = drm_compat_ioctl, 46 .compat_ioctl = drm_compat_ioctl,
48#endif 47#endif
@@ -51,7 +50,7 @@ static const struct file_operations savage_driver_fops = {
51 50
52static struct drm_driver driver = { 51static struct drm_driver driver = {
53 .driver_features = 52 .driver_features =
54 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, 53 DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
55 .dev_priv_size = sizeof(drm_savage_buf_priv_t), 54 .dev_priv_size = sizeof(drm_savage_buf_priv_t),
56 .load = savage_driver_load, 55 .load = savage_driver_load,
57 .firstopen = savage_driver_firstopen, 56 .firstopen = savage_driver_firstopen,
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index c05082a59f6f..335f8fcf1041 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -104,7 +104,7 @@ enum savage_family {
104 S3_LAST 104 S3_LAST
105}; 105};
106 106
107extern struct drm_ioctl_desc savage_ioctls[]; 107extern const struct drm_ioctl_desc savage_ioctls[];
108extern int savage_max_ioctl; 108extern int savage_max_ioctl;
109 109
110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) 110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 99e2034e49cc..54bad98e9477 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -465,7 +465,8 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
465 465
466static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, 466static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
467 struct drm_framebuffer *fb, 467 struct drm_framebuffer *fb,
468 struct drm_pending_vblank_event *event) 468 struct drm_pending_vblank_event *event,
469 uint32_t page_flip_flags)
469{ 470{
470 struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); 471 struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
471 struct drm_device *dev = scrtc->crtc.dev; 472 struct drm_device *dev = scrtc->crtc.dev;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 5f83f9a3ef59..015551866b4a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -257,7 +257,6 @@ static const struct file_operations shmob_drm_fops = {
257#endif 257#endif
258 .poll = drm_poll, 258 .poll = drm_poll,
259 .read = drm_read, 259 .read = drm_read,
260 .fasync = drm_fasync,
261 .llseek = no_llseek, 260 .llseek = no_llseek,
262 .mmap = drm_gem_cma_mmap, 261 .mmap = drm_gem_cma_mmap,
263}; 262};
@@ -285,7 +284,7 @@ static struct drm_driver shmob_drm_driver = {
285 .gem_prime_mmap = drm_gem_cma_prime_mmap, 284 .gem_prime_mmap = drm_gem_cma_prime_mmap,
286 .dumb_create = drm_gem_cma_dumb_create, 285 .dumb_create = drm_gem_cma_dumb_create,
287 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 286 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
288 .dumb_destroy = drm_gem_cma_dumb_destroy, 287 .dumb_destroy = drm_gem_dumb_destroy,
289 .fops = &shmob_drm_fops, 288 .fops = &shmob_drm_fops,
290 .name = "shmob-drm", 289 .name = "shmob-drm",
291 .desc = "Renesas SH Mobile DRM", 290 .desc = "Renesas SH Mobile DRM",
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 5a5325e6b759..4383b74a3aa4 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -72,7 +72,6 @@ static const struct file_operations sis_driver_fops = {
72 .unlocked_ioctl = drm_ioctl, 72 .unlocked_ioctl = drm_ioctl,
73 .mmap = drm_mmap, 73 .mmap = drm_mmap,
74 .poll = drm_poll, 74 .poll = drm_poll,
75 .fasync = drm_fasync,
76#ifdef CONFIG_COMPAT 75#ifdef CONFIG_COMPAT
77 .compat_ioctl = drm_compat_ioctl, 76 .compat_ioctl = drm_compat_ioctl,
78#endif 77#endif
@@ -103,7 +102,7 @@ void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
103} 102}
104 103
105static struct drm_driver driver = { 104static struct drm_driver driver = {
106 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, 105 .driver_features = DRIVER_USE_AGP,
107 .load = sis_driver_load, 106 .load = sis_driver_load,
108 .unload = sis_driver_unload, 107 .unload = sis_driver_unload,
109 .open = sis_driver_open, 108 .open = sis_driver_open,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 13b527bb83be..c31c0253054d 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -70,7 +70,7 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev,
70 struct drm_file *file_priv); 70 struct drm_file *file_priv);
71extern void sis_lastclose(struct drm_device *dev); 71extern void sis_lastclose(struct drm_device *dev);
72 72
73extern struct drm_ioctl_desc sis_ioctls[]; 73extern const struct drm_ioctl_desc sis_ioctls[];
74extern int sis_max_ioctl; 74extern int sis_max_ioctl;
75 75
76#endif 76#endif
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 9a43d98e5003..01857d836350 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,7 +109,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
109 if (pool == AGP_TYPE) { 109 if (pool == AGP_TYPE) {
110 retval = drm_mm_insert_node(&dev_priv->agp_mm, 110 retval = drm_mm_insert_node(&dev_priv->agp_mm,
111 &item->mm_node, 111 &item->mm_node,
112 mem->size, 0); 112 mem->size, 0,
113 DRM_MM_SEARCH_DEFAULT);
113 offset = item->mm_node.start; 114 offset = item->mm_node.start;
114 } else { 115 } else {
115#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) 116#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -121,7 +122,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
121#else 122#else
122 retval = drm_mm_insert_node(&dev_priv->vram_mm, 123 retval = drm_mm_insert_node(&dev_priv->vram_mm,
123 &item->mm_node, 124 &item->mm_node,
124 mem->size, 0); 125 mem->size, 0,
126 DRM_MM_SEARCH_DEFAULT);
125 offset = item->mm_node.start; 127 offset = item->mm_node.start;
126#endif 128#endif
127 } 129 }
@@ -348,7 +350,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
348 return; 350 return;
349} 351}
350 352
351struct drm_ioctl_desc sis_ioctls[] = { 353const struct drm_ioctl_desc sis_ioctls[] = {
352 DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), 354 DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
353 DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH), 355 DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
354 DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), 356 DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index ddfa743459d0..3492ca5c46d3 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -48,7 +48,6 @@ static const struct file_operations tdfx_driver_fops = {
48 .unlocked_ioctl = drm_ioctl, 48 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap, 49 .mmap = drm_mmap,
50 .poll = drm_poll, 50 .poll = drm_poll,
51 .fasync = drm_fasync,
52#ifdef CONFIG_COMPAT 51#ifdef CONFIG_COMPAT
53 .compat_ioctl = drm_compat_ioctl, 52 .compat_ioctl = drm_compat_ioctl,
54#endif 53#endif
@@ -56,7 +55,6 @@ static const struct file_operations tdfx_driver_fops = {
56}; 55};
57 56
58static struct drm_driver driver = { 57static struct drm_driver driver = {
59 .driver_features = DRIVER_USE_MTRR,
60 .fops = &tdfx_driver_fops, 58 .fops = &tdfx_driver_fops,
61 .name = DRIVER_NAME, 59 .name = DRIVER_NAME,
62 .desc = DRIVER_DESC, 60 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 7418dcd986d3..d36efc13b16f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -15,7 +15,7 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/kfifo.h> 18#include "drm_flip_work.h"
19 19
20#include "tilcdc_drv.h" 20#include "tilcdc_drv.h"
21#include "tilcdc_regs.h" 21#include "tilcdc_regs.h"
@@ -35,21 +35,18 @@ struct tilcdc_crtc {
35 struct drm_framebuffer *scanout[2]; 35 struct drm_framebuffer *scanout[2];
36 36
37 /* for deferred fb unref's: */ 37 /* for deferred fb unref's: */
38 DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *); 38 struct drm_flip_work unref_work;
39 struct work_struct work;
40}; 39};
41#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base) 40#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
42 41
43static void unref_worker(struct work_struct *work) 42static void unref_worker(struct drm_flip_work *work, void *val)
44{ 43{
45 struct tilcdc_crtc *tilcdc_crtc = 44 struct tilcdc_crtc *tilcdc_crtc =
46 container_of(work, struct tilcdc_crtc, work); 45 container_of(work, struct tilcdc_crtc, unref_work);
47 struct drm_device *dev = tilcdc_crtc->base.dev; 46 struct drm_device *dev = tilcdc_crtc->base.dev;
48 struct drm_framebuffer *fb;
49 47
50 mutex_lock(&dev->mode_config.mutex); 48 mutex_lock(&dev->mode_config.mutex);
51 while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb)) 49 drm_framebuffer_unreference(val);
52 drm_framebuffer_unreference(fb);
53 mutex_unlock(&dev->mode_config.mutex); 50 mutex_unlock(&dev->mode_config.mutex);
54} 51}
55 52
@@ -68,19 +65,14 @@ static void set_scanout(struct drm_crtc *crtc, int n)
68 }; 65 };
69 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 66 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
70 struct drm_device *dev = crtc->dev; 67 struct drm_device *dev = crtc->dev;
68 struct tilcdc_drm_private *priv = dev->dev_private;
71 69
72 pm_runtime_get_sync(dev->dev); 70 pm_runtime_get_sync(dev->dev);
73 tilcdc_write(dev, base_reg[n], tilcdc_crtc->start); 71 tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
74 tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end); 72 tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
75 if (tilcdc_crtc->scanout[n]) { 73 if (tilcdc_crtc->scanout[n]) {
76 if (kfifo_put(&tilcdc_crtc->unref_fifo, 74 drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]);
77 (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) { 75 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
78 struct tilcdc_drm_private *priv = dev->dev_private;
79 queue_work(priv->wq, &tilcdc_crtc->work);
80 } else {
81 dev_err(dev->dev, "unref fifo full!\n");
82 drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
83 }
84 } 76 }
85 tilcdc_crtc->scanout[n] = crtc->fb; 77 tilcdc_crtc->scanout[n] = crtc->fb;
86 drm_framebuffer_reference(tilcdc_crtc->scanout[n]); 78 drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
@@ -149,14 +141,15 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
149 WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON); 141 WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
150 142
151 drm_crtc_cleanup(crtc); 143 drm_crtc_cleanup(crtc);
152 WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo)); 144 drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
153 kfifo_free(&tilcdc_crtc->unref_fifo); 145
154 kfree(tilcdc_crtc); 146 kfree(tilcdc_crtc);
155} 147}
156 148
157static int tilcdc_crtc_page_flip(struct drm_crtc *crtc, 149static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
158 struct drm_framebuffer *fb, 150 struct drm_framebuffer *fb,
159 struct drm_pending_vblank_event *event) 151 struct drm_pending_vblank_event *event,
152 uint32_t page_flip_flags)
160{ 153{
161 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 154 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
162 struct drm_device *dev = crtc->dev; 155 struct drm_device *dev = crtc->dev;
@@ -379,7 +372,12 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
379 else 372 else
380 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); 373 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
381 374
382 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 375 /*
376 * use value from adjusted_mode here as this might have been
377 * changed as part of the fixup for slave encoders to solve the
378 * issue where tilcdc timings are not VESA compliant
379 */
380 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
383 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); 381 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
384 else 382 else
385 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); 383 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
@@ -666,14 +664,13 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
666 tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF; 664 tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
667 init_waitqueue_head(&tilcdc_crtc->frame_done_wq); 665 init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
668 666
669 ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL); 667 ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16,
668 "unref", unref_worker);
670 if (ret) { 669 if (ret) {
671 dev_err(dev->dev, "could not allocate unref FIFO\n"); 670 dev_err(dev->dev, "could not allocate unref FIFO\n");
672 goto fail; 671 goto fail;
673 } 672 }
674 673
675 INIT_WORK(&tilcdc_crtc->work, unref_worker);
676
677 ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs); 674 ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
678 if (ret < 0) 675 if (ret < 0)
679 goto fail; 676 goto fail;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 40b71da5a214..116da199b942 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -497,7 +497,6 @@ static const struct file_operations fops = {
497#endif 497#endif
498 .poll = drm_poll, 498 .poll = drm_poll,
499 .read = drm_read, 499 .read = drm_read,
500 .fasync = drm_fasync,
501 .llseek = no_llseek, 500 .llseek = no_llseek,
502 .mmap = drm_gem_cma_mmap, 501 .mmap = drm_gem_cma_mmap,
503}; 502};
@@ -519,7 +518,7 @@ static struct drm_driver tilcdc_driver = {
519 .gem_vm_ops = &drm_gem_cma_vm_ops, 518 .gem_vm_ops = &drm_gem_cma_vm_ops,
520 .dumb_create = drm_gem_cma_dumb_create, 519 .dumb_create = drm_gem_cma_dumb_create,
521 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 520 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
522 .dumb_destroy = drm_gem_cma_dumb_destroy, 521 .dumb_destroy = drm_gem_dumb_destroy,
523#ifdef CONFIG_DEBUG_FS 522#ifdef CONFIG_DEBUG_FS
524 .debugfs_init = tilcdc_debugfs_init, 523 .debugfs_init = tilcdc_debugfs_init,
525 .debugfs_cleanup = tilcdc_debugfs_cleanup, 524 .debugfs_cleanup = tilcdc_debugfs_cleanup,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index dfffaf014022..23b3203d8241 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -73,13 +73,38 @@ static void slave_encoder_prepare(struct drm_encoder *encoder)
73 tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info); 73 tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
74} 74}
75 75
76static bool slave_encoder_fixup(struct drm_encoder *encoder,
77 const struct drm_display_mode *mode,
78 struct drm_display_mode *adjusted_mode)
79{
80 /*
81 * tilcdc does not generate VESA-complient sync but aligns
82 * VS on the second edge of HS instead of first edge.
83 * We use adjusted_mode, to fixup sync by aligning both rising
84 * edges and add HSKEW offset to let the slave encoder fix it up.
85 */
86 adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
87 adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
88
89 if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
90 adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
91 adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
92 } else {
93 adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
94 adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
95 }
96
97 return drm_i2c_encoder_mode_fixup(encoder, mode, adjusted_mode);
98}
99
100
76static const struct drm_encoder_funcs slave_encoder_funcs = { 101static const struct drm_encoder_funcs slave_encoder_funcs = {
77 .destroy = slave_encoder_destroy, 102 .destroy = slave_encoder_destroy,
78}; 103};
79 104
80static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = { 105static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
81 .dpms = drm_i2c_encoder_dpms, 106 .dpms = drm_i2c_encoder_dpms,
82 .mode_fixup = drm_i2c_encoder_mode_fixup, 107 .mode_fixup = slave_encoder_fixup,
83 .prepare = slave_encoder_prepare, 108 .prepare = slave_encoder_prepare,
84 .commit = drm_i2c_encoder_commit, 109 .commit = drm_i2c_encoder_commit,
85 .mode_set = drm_i2c_encoder_mode_set, 110 .mode_set = drm_i2c_encoder_mode_set,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb9dd674670c..f1a857ec1021 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,7 +45,6 @@
45#define TTM_DEBUG(fmt, arg...) 45#define TTM_DEBUG(fmt, arg...)
46#define TTM_BO_HASH_ORDER 13 46#define TTM_BO_HASH_ORDER 13
47 47
48static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
49static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 48static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50static void ttm_bo_global_kobj_release(struct kobject *kobj); 49static void ttm_bo_global_kobj_release(struct kobject *kobj);
51 50
@@ -615,13 +614,7 @@ static void ttm_bo_release(struct kref *kref)
615 struct ttm_bo_device *bdev = bo->bdev; 614 struct ttm_bo_device *bdev = bo->bdev;
616 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 615 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
617 616
618 write_lock(&bdev->vm_lock); 617 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
619 if (likely(bo->vm_node != NULL)) {
620 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
621 drm_mm_put_block(bo->vm_node);
622 bo->vm_node = NULL;
623 }
624 write_unlock(&bdev->vm_lock);
625 ttm_mem_io_lock(man, false); 618 ttm_mem_io_lock(man, false);
626 ttm_mem_io_free_vm(bo); 619 ttm_mem_io_free_vm(bo);
627 ttm_mem_io_unlock(man); 620 ttm_mem_io_unlock(man);
@@ -1129,6 +1122,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1129 bo->resv = &bo->ttm_resv; 1122 bo->resv = &bo->ttm_resv;
1130 reservation_object_init(bo->resv); 1123 reservation_object_init(bo->resv);
1131 atomic_inc(&bo->glob->bo_count); 1124 atomic_inc(&bo->glob->bo_count);
1125 drm_vma_node_reset(&bo->vma_node);
1132 1126
1133 ret = ttm_bo_check_placement(bo, placement); 1127 ret = ttm_bo_check_placement(bo, placement);
1134 1128
@@ -1139,7 +1133,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1139 if (likely(!ret) && 1133 if (likely(!ret) &&
1140 (bo->type == ttm_bo_type_device || 1134 (bo->type == ttm_bo_type_device ||
1141 bo->type == ttm_bo_type_sg)) 1135 bo->type == ttm_bo_type_sg))
1142 ret = ttm_bo_setup_vm(bo); 1136 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1137 bo->mem.num_pages);
1143 1138
1144 locked = ww_mutex_trylock(&bo->resv->lock); 1139 locked = ww_mutex_trylock(&bo->resv->lock);
1145 WARN_ON(!locked); 1140 WARN_ON(!locked);
@@ -1424,10 +1419,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
1424 TTM_DEBUG("Swap list was clean\n"); 1419 TTM_DEBUG("Swap list was clean\n");
1425 spin_unlock(&glob->lru_lock); 1420 spin_unlock(&glob->lru_lock);
1426 1421
1427 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); 1422 drm_vma_offset_manager_destroy(&bdev->vma_manager);
1428 write_lock(&bdev->vm_lock);
1429 drm_mm_takedown(&bdev->addr_space_mm);
1430 write_unlock(&bdev->vm_lock);
1431 1423
1432 return ret; 1424 return ret;
1433} 1425}
@@ -1441,7 +1433,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1441{ 1433{
1442 int ret = -EINVAL; 1434 int ret = -EINVAL;
1443 1435
1444 rwlock_init(&bdev->vm_lock);
1445 bdev->driver = driver; 1436 bdev->driver = driver;
1446 1437
1447 memset(bdev->man, 0, sizeof(bdev->man)); 1438 memset(bdev->man, 0, sizeof(bdev->man));
@@ -1454,9 +1445,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1454 if (unlikely(ret != 0)) 1445 if (unlikely(ret != 0))
1455 goto out_no_sys; 1446 goto out_no_sys;
1456 1447
1457 bdev->addr_space_rb = RB_ROOT; 1448 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1458 drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 1449 0x10000000);
1459
1460 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1450 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1461 INIT_LIST_HEAD(&bdev->ddestroy); 1451 INIT_LIST_HEAD(&bdev->ddestroy);
1462 bdev->dev_mapping = NULL; 1452 bdev->dev_mapping = NULL;
@@ -1498,12 +1488,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1498void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1488void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1499{ 1489{
1500 struct ttm_bo_device *bdev = bo->bdev; 1490 struct ttm_bo_device *bdev = bo->bdev;
1501 loff_t offset = (loff_t) bo->addr_space_offset;
1502 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1503 1491
1504 if (!bdev->dev_mapping) 1492 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1505 return;
1506 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1507 ttm_mem_io_free_vm(bo); 1493 ttm_mem_io_free_vm(bo);
1508} 1494}
1509 1495
@@ -1520,78 +1506,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1520 1506
1521EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1507EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1522 1508
1523static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1524{
1525 struct ttm_bo_device *bdev = bo->bdev;
1526 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1527 struct rb_node *parent = NULL;
1528 struct ttm_buffer_object *cur_bo;
1529 unsigned long offset = bo->vm_node->start;
1530 unsigned long cur_offset;
1531
1532 while (*cur) {
1533 parent = *cur;
1534 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1535 cur_offset = cur_bo->vm_node->start;
1536 if (offset < cur_offset)
1537 cur = &parent->rb_left;
1538 else if (offset > cur_offset)
1539 cur = &parent->rb_right;
1540 else
1541 BUG();
1542 }
1543
1544 rb_link_node(&bo->vm_rb, parent, cur);
1545 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1546}
1547
1548/**
1549 * ttm_bo_setup_vm:
1550 *
1551 * @bo: the buffer to allocate address space for
1552 *
1553 * Allocate address space in the drm device so that applications
1554 * can mmap the buffer and access the contents. This only
1555 * applies to ttm_bo_type_device objects as others are not
1556 * placed in the drm device address space.
1557 */
1558
1559static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1560{
1561 struct ttm_bo_device *bdev = bo->bdev;
1562 int ret;
1563
1564retry_pre_get:
1565 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1566 if (unlikely(ret != 0))
1567 return ret;
1568
1569 write_lock(&bdev->vm_lock);
1570 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1571 bo->mem.num_pages, 0, 0);
1572
1573 if (unlikely(bo->vm_node == NULL)) {
1574 ret = -ENOMEM;
1575 goto out_unlock;
1576 }
1577
1578 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1579 bo->mem.num_pages, 0);
1580
1581 if (unlikely(bo->vm_node == NULL)) {
1582 write_unlock(&bdev->vm_lock);
1583 goto retry_pre_get;
1584 }
1585
1586 ttm_bo_vm_insert_rb(bo);
1587 write_unlock(&bdev->vm_lock);
1588 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1589
1590 return 0;
1591out_unlock:
1592 write_unlock(&bdev->vm_lock);
1593 return ret;
1594}
1595 1509
1596int ttm_bo_wait(struct ttm_buffer_object *bo, 1510int ttm_bo_wait(struct ttm_buffer_object *bo,
1597 bool lazy, bool interruptible, bool no_wait) 1511 bool lazy, bool interruptible, bool no_wait)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index e4367f91472a..c58eba33bd5f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -61,28 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
61 lpfn = placement->lpfn; 61 lpfn = placement->lpfn;
62 if (!lpfn) 62 if (!lpfn)
63 lpfn = man->size; 63 lpfn = man->size;
64 do {
65 ret = drm_mm_pre_get(mm);
66 if (unlikely(ret))
67 return ret;
68 64
69 spin_lock(&rman->lock); 65 node = kzalloc(sizeof(*node), GFP_KERNEL);
70 node = drm_mm_search_free_in_range(mm, 66 if (!node)
71 mem->num_pages, mem->page_alignment, 67 return -ENOMEM;
72 placement->fpfn, lpfn, 1); 68
73 if (unlikely(node == NULL)) { 69 spin_lock(&rman->lock);
74 spin_unlock(&rman->lock); 70 ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
75 return 0; 71 mem->page_alignment,
76 } 72 placement->fpfn, lpfn,
77 node = drm_mm_get_block_atomic_range(node, mem->num_pages, 73 DRM_MM_SEARCH_BEST);
78 mem->page_alignment, 74 spin_unlock(&rman->lock);
79 placement->fpfn, 75
80 lpfn); 76 if (unlikely(ret)) {
81 spin_unlock(&rman->lock); 77 kfree(node);
82 } while (node == NULL); 78 } else {
79 mem->mm_node = node;
80 mem->start = node->start;
81 }
83 82
84 mem->mm_node = node;
85 mem->start = node->start;
86 return 0; 83 return 0;
87} 84}
88 85
@@ -93,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
93 90
94 if (mem->mm_node) { 91 if (mem->mm_node) {
95 spin_lock(&rman->lock); 92 spin_lock(&rman->lock);
96 drm_mm_put_block(mem->mm_node); 93 drm_mm_remove_node(mem->mm_node);
97 spin_unlock(&rman->lock); 94 spin_unlock(&rman->lock);
95
96 kfree(mem->mm_node);
98 mem->mm_node = NULL; 97 mem->mm_node = NULL;
99 } 98 }
100} 99}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 319cf4127c5b..7cc904d3a4d1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -30,6 +30,7 @@
30 30
31#include <drm/ttm/ttm_bo_driver.h> 31#include <drm/ttm/ttm_bo_driver.h>
32#include <drm/ttm/ttm_placement.h> 32#include <drm/ttm/ttm_placement.h>
33#include <drm/drm_vma_manager.h>
33#include <linux/io.h> 34#include <linux/io.h>
34#include <linux/highmem.h> 35#include <linux/highmem.h>
35#include <linux/wait.h> 36#include <linux/wait.h>
@@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
450 INIT_LIST_HEAD(&fbo->lru); 451 INIT_LIST_HEAD(&fbo->lru);
451 INIT_LIST_HEAD(&fbo->swap); 452 INIT_LIST_HEAD(&fbo->swap);
452 INIT_LIST_HEAD(&fbo->io_reserve_lru); 453 INIT_LIST_HEAD(&fbo->io_reserve_lru);
453 fbo->vm_node = NULL; 454 drm_vma_node_reset(&fbo->vma_node);
454 atomic_set(&fbo->cpu_writers, 0); 455 atomic_set(&fbo->cpu_writers, 0);
455 456
456 spin_lock(&bdev->fence_lock); 457 spin_lock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3df9f16b041c..1006c15445e9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -33,6 +33,7 @@
33#include <ttm/ttm_module.h> 33#include <ttm/ttm_module.h>
34#include <ttm/ttm_bo_driver.h> 34#include <ttm/ttm_bo_driver.h>
35#include <ttm/ttm_placement.h> 35#include <ttm/ttm_placement.h>
36#include <drm/drm_vma_manager.h>
36#include <linux/mm.h> 37#include <linux/mm.h>
37#include <linux/rbtree.h> 38#include <linux/rbtree.h>
38#include <linux/module.h> 39#include <linux/module.h>
@@ -40,37 +41,6 @@
40 41
41#define TTM_BO_VM_NUM_PREFAULT 16 42#define TTM_BO_VM_NUM_PREFAULT 16
42 43
43static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
44 unsigned long page_start,
45 unsigned long num_pages)
46{
47 struct rb_node *cur = bdev->addr_space_rb.rb_node;
48 unsigned long cur_offset;
49 struct ttm_buffer_object *bo;
50 struct ttm_buffer_object *best_bo = NULL;
51
52 while (likely(cur != NULL)) {
53 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
54 cur_offset = bo->vm_node->start;
55 if (page_start >= cur_offset) {
56 cur = cur->rb_right;
57 best_bo = bo;
58 if (page_start == cur_offset)
59 break;
60 } else
61 cur = cur->rb_left;
62 }
63
64 if (unlikely(best_bo == NULL))
65 return NULL;
66
67 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
68 (page_start + num_pages)))
69 return NULL;
70
71 return best_bo;
72}
73
74static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 44static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
75{ 45{
76 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 46 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
146 } 116 }
147 117
148 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 118 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
149 bo->vm_node->start - vma->vm_pgoff; 119 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
150 page_last = vma_pages(vma) + 120 page_last = vma_pages(vma) +
151 bo->vm_node->start - vma->vm_pgoff; 121 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
152 122
153 if (unlikely(page_offset >= bo->num_pages)) { 123 if (unlikely(page_offset >= bo->num_pages)) {
154 retval = VM_FAULT_SIGBUS; 124 retval = VM_FAULT_SIGBUS;
@@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
249 .close = ttm_bo_vm_close 219 .close = ttm_bo_vm_close
250}; 220};
251 221
222static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
223 unsigned long offset,
224 unsigned long pages)
225{
226 struct drm_vma_offset_node *node;
227 struct ttm_buffer_object *bo = NULL;
228
229 drm_vma_offset_lock_lookup(&bdev->vma_manager);
230
231 node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
232 if (likely(node)) {
233 bo = container_of(node, struct ttm_buffer_object, vma_node);
234 if (!kref_get_unless_zero(&bo->kref))
235 bo = NULL;
236 }
237
238 drm_vma_offset_unlock_lookup(&bdev->vma_manager);
239
240 if (!bo)
241 pr_err("Could not find buffer object to map\n");
242
243 return bo;
244}
245
252int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, 246int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
253 struct ttm_bo_device *bdev) 247 struct ttm_bo_device *bdev)
254{ 248{
@@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
256 struct ttm_buffer_object *bo; 250 struct ttm_buffer_object *bo;
257 int ret; 251 int ret;
258 252
259 read_lock(&bdev->vm_lock); 253 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, 254 if (unlikely(!bo))
261 vma_pages(vma));
262 if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
263 bo = NULL;
264 read_unlock(&bdev->vm_lock);
265
266 if (unlikely(bo == NULL)) {
267 pr_err("Could not find buffer object to map\n");
268 return -EINVAL; 255 return -EINVAL;
269 }
270 256
271 driver = bo->bdev->driver; 257 driver = bo->bdev->driver;
272 if (unlikely(!driver->verify_access)) { 258 if (unlikely(!driver->verify_access)) {
@@ -304,162 +290,3 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
304 return 0; 290 return 0;
305} 291}
306EXPORT_SYMBOL(ttm_fbdev_mmap); 292EXPORT_SYMBOL(ttm_fbdev_mmap);
307
308
309ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
310 const char __user *wbuf, char __user *rbuf, size_t count,
311 loff_t *f_pos, bool write)
312{
313 struct ttm_buffer_object *bo;
314 struct ttm_bo_driver *driver;
315 struct ttm_bo_kmap_obj map;
316 unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
317 unsigned long kmap_offset;
318 unsigned long kmap_end;
319 unsigned long kmap_num;
320 size_t io_size;
321 unsigned int page_offset;
322 char *virtual;
323 int ret;
324 bool no_wait = false;
325 bool dummy;
326
327 read_lock(&bdev->vm_lock);
328 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
329 if (likely(bo != NULL))
330 ttm_bo_reference(bo);
331 read_unlock(&bdev->vm_lock);
332
333 if (unlikely(bo == NULL))
334 return -EFAULT;
335
336 driver = bo->bdev->driver;
337 if (unlikely(!driver->verify_access)) {
338 ret = -EPERM;
339 goto out_unref;
340 }
341
342 ret = driver->verify_access(bo, filp);
343 if (unlikely(ret != 0))
344 goto out_unref;
345
346 kmap_offset = dev_offset - bo->vm_node->start;
347 if (unlikely(kmap_offset >= bo->num_pages)) {
348 ret = -EFBIG;
349 goto out_unref;
350 }
351
352 page_offset = *f_pos & ~PAGE_MASK;
353 io_size = bo->num_pages - kmap_offset;
354 io_size = (io_size << PAGE_SHIFT) - page_offset;
355 if (count < io_size)
356 io_size = count;
357
358 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
359 kmap_num = kmap_end - kmap_offset + 1;
360
361 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
362
363 switch (ret) {
364 case 0:
365 break;
366 case -EBUSY:
367 ret = -EAGAIN;
368 goto out_unref;
369 default:
370 goto out_unref;
371 }
372
373 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
374 if (unlikely(ret != 0)) {
375 ttm_bo_unreserve(bo);
376 goto out_unref;
377 }
378
379 virtual = ttm_kmap_obj_virtual(&map, &dummy);
380 virtual += page_offset;
381
382 if (write)
383 ret = copy_from_user(virtual, wbuf, io_size);
384 else
385 ret = copy_to_user(rbuf, virtual, io_size);
386
387 ttm_bo_kunmap(&map);
388 ttm_bo_unreserve(bo);
389 ttm_bo_unref(&bo);
390
391 if (unlikely(ret != 0))
392 return -EFBIG;
393
394 *f_pos += io_size;
395
396 return io_size;
397out_unref:
398 ttm_bo_unref(&bo);
399 return ret;
400}
401
402ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
403 char __user *rbuf, size_t count, loff_t *f_pos,
404 bool write)
405{
406 struct ttm_bo_kmap_obj map;
407 unsigned long kmap_offset;
408 unsigned long kmap_end;
409 unsigned long kmap_num;
410 size_t io_size;
411 unsigned int page_offset;
412 char *virtual;
413 int ret;
414 bool no_wait = false;
415 bool dummy;
416
417 kmap_offset = (*f_pos >> PAGE_SHIFT);
418 if (unlikely(kmap_offset >= bo->num_pages))
419 return -EFBIG;
420
421 page_offset = *f_pos & ~PAGE_MASK;
422 io_size = bo->num_pages - kmap_offset;
423 io_size = (io_size << PAGE_SHIFT) - page_offset;
424 if (count < io_size)
425 io_size = count;
426
427 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
428 kmap_num = kmap_end - kmap_offset + 1;
429
430 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
431
432 switch (ret) {
433 case 0:
434 break;
435 case -EBUSY:
436 return -EAGAIN;
437 default:
438 return ret;
439 }
440
441 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
442 if (unlikely(ret != 0)) {
443 ttm_bo_unreserve(bo);
444 return ret;
445 }
446
447 virtual = ttm_kmap_obj_virtual(&map, &dummy);
448 virtual += page_offset;
449
450 if (write)
451 ret = copy_from_user(virtual, wbuf, io_size);
452 else
453 ret = copy_to_user(rbuf, virtual, io_size);
454
455 ttm_bo_kunmap(&map);
456 ttm_bo_unreserve(bo);
457 ttm_bo_unref(&bo);
458
459 if (unlikely(ret != 0))
460 return ret;
461
462 *f_pos += io_size;
463
464 return io_size;
465}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index c0770dbba74a..7650dc0d78ce 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -65,7 +65,6 @@ static const struct file_operations udl_driver_fops = {
65 .read = drm_read, 65 .read = drm_read,
66 .unlocked_ioctl = drm_ioctl, 66 .unlocked_ioctl = drm_ioctl,
67 .release = drm_release, 67 .release = drm_release,
68 .fasync = drm_fasync,
69#ifdef CONFIG_COMPAT 68#ifdef CONFIG_COMPAT
70 .compat_ioctl = drm_compat_ioctl, 69 .compat_ioctl = drm_compat_ioctl,
71#endif 70#endif
@@ -84,7 +83,7 @@ static struct drm_driver driver = {
84 83
85 .dumb_create = udl_dumb_create, 84 .dumb_create = udl_dumb_create,
86 .dumb_map_offset = udl_gem_mmap, 85 .dumb_map_offset = udl_gem_mmap,
87 .dumb_destroy = udl_dumb_destroy, 86 .dumb_destroy = drm_gem_dumb_destroy,
88 .fops = &udl_driver_fops, 87 .fops = &udl_driver_fops,
89 88
90 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 89 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index cc6d90f28c71..56aec9409fa3 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv,
114 struct drm_mode_create_dumb *args); 114 struct drm_mode_create_dumb *args);
115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev, 115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
116 uint32_t handle, uint64_t *offset); 116 uint32_t handle, uint64_t *offset);
117int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
118 uint32_t handle);
119 117
120int udl_gem_init_object(struct drm_gem_object *obj); 118int udl_gem_init_object(struct drm_gem_object *obj);
121void udl_gem_free_object(struct drm_gem_object *gem_obj); 119void udl_gem_free_object(struct drm_gem_object *gem_obj);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index ef034fa3e6f5..8dbe9d0ae9a7 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file,
66 args->size, &args->handle); 66 args->size, &args->handle);
67} 67}
68 68
69int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
70 uint32_t handle)
71{
72 return drm_gem_handle_delete(file, handle);
73}
74
75int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 69int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
76{ 70{
77 int ret; 71 int ret;
@@ -123,55 +117,23 @@ int udl_gem_init_object(struct drm_gem_object *obj)
123 117
124static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) 118static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
125{ 119{
126 int page_count, i; 120 struct page **pages;
127 struct page *page;
128 struct inode *inode;
129 struct address_space *mapping;
130 121
131 if (obj->pages) 122 if (obj->pages)
132 return 0; 123 return 0;
133 124
134 page_count = obj->base.size / PAGE_SIZE; 125 pages = drm_gem_get_pages(&obj->base, gfpmask);
135 BUG_ON(obj->pages != NULL); 126 if (IS_ERR(pages))
136 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); 127 return PTR_ERR(pages);
137 if (obj->pages == NULL)
138 return -ENOMEM;
139 128
140 inode = file_inode(obj->base.filp); 129 obj->pages = pages;
141 mapping = inode->i_mapping;
142 gfpmask |= mapping_gfp_mask(mapping);
143
144 for (i = 0; i < page_count; i++) {
145 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
146 if (IS_ERR(page))
147 goto err_pages;
148 obj->pages[i] = page;
149 }
150 130
151 return 0; 131 return 0;
152err_pages:
153 while (i--)
154 page_cache_release(obj->pages[i]);
155 drm_free_large(obj->pages);
156 obj->pages = NULL;
157 return PTR_ERR(page);
158} 132}
159 133
160static void udl_gem_put_pages(struct udl_gem_object *obj) 134static void udl_gem_put_pages(struct udl_gem_object *obj)
161{ 135{
162 int page_count = obj->base.size / PAGE_SIZE; 136 drm_gem_put_pages(&obj->base, obj->pages, false, false);
163 int i;
164
165 if (obj->base.import_attach) {
166 drm_free_large(obj->pages);
167 obj->pages = NULL;
168 return;
169 }
170
171 for (i = 0; i < page_count; i++)
172 page_cache_release(obj->pages[i]);
173
174 drm_free_large(obj->pages);
175 obj->pages = NULL; 137 obj->pages = NULL;
176} 138}
177 139
@@ -223,8 +185,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
223 if (obj->pages) 185 if (obj->pages)
224 udl_gem_put_pages(obj); 186 udl_gem_put_pages(obj);
225 187
226 if (gem_obj->map_list.map) 188 drm_gem_free_mmap_offset(gem_obj);
227 drm_gem_free_mmap_offset(gem_obj);
228} 189}
229 190
230/* the dumb interface doesn't work with the GEM straight MMAP 191/* the dumb interface doesn't work with the GEM straight MMAP
@@ -247,13 +208,11 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
247 ret = udl_gem_get_pages(gobj, GFP_KERNEL); 208 ret = udl_gem_get_pages(gobj, GFP_KERNEL);
248 if (ret) 209 if (ret)
249 goto out; 210 goto out;
250 if (!gobj->base.map_list.map) { 211 ret = drm_gem_create_mmap_offset(obj);
251 ret = drm_gem_create_mmap_offset(obj); 212 if (ret)
252 if (ret) 213 goto out;
253 goto out;
254 }
255 214
256 *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT; 215 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
257 216
258out: 217out:
259 drm_gem_object_unreference(&gobj->base); 218 drm_gem_object_unreference(&gobj->base);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 0ce2d7195256..f5ae57406f34 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -41,8 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ 41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); 42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
43 if (total_len > 5) { 43 if (total_len > 5) {
44 DRM_INFO("vendor descriptor length:%x data:%*ph\n", 44 DRM_INFO("vendor descriptor length:%x data:%11ph\n",
45 total_len, 11, desc); 45 total_len, desc);
46 46
47 if ((desc[0] != total_len) || /* descriptor length */ 47 if ((desc[0] != total_len) || /* descriptor length */
48 (desc[1] != 0x5f) || /* vendor descriptor type */ 48 (desc[1] != 0x5f) || /* vendor descriptor type */
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 13558f5a2422..652f9b43ec9d 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -720,7 +720,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
720 return ret; 720 return ret;
721} 721}
722 722
723struct drm_ioctl_desc via_ioctls[] = { 723const struct drm_ioctl_desc via_ioctls[] = {
724 DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), 724 DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
725 DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH), 725 DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
726 DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), 726 DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index f4ae20327941..92684a9b7e34 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -64,7 +64,6 @@ static const struct file_operations via_driver_fops = {
64 .unlocked_ioctl = drm_ioctl, 64 .unlocked_ioctl = drm_ioctl,
65 .mmap = drm_mmap, 65 .mmap = drm_mmap,
66 .poll = drm_poll, 66 .poll = drm_poll,
67 .fasync = drm_fasync,
68#ifdef CONFIG_COMPAT 67#ifdef CONFIG_COMPAT
69 .compat_ioctl = drm_compat_ioctl, 68 .compat_ioctl = drm_compat_ioctl,
70#endif 69#endif
@@ -73,7 +72,7 @@ static const struct file_operations via_driver_fops = {
73 72
74static struct drm_driver driver = { 73static struct drm_driver driver = {
75 .driver_features = 74 .driver_features =
76 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 75 DRIVER_USE_AGP | DRIVER_HAVE_IRQ |
77 DRIVER_IRQ_SHARED, 76 DRIVER_IRQ_SHARED,
78 .load = via_driver_load, 77 .load = via_driver_load,
79 .unload = via_driver_unload, 78 .unload = via_driver_unload,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 893a65090c36..a811ef2b505f 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -114,7 +114,7 @@ enum via_family {
114#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 114#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
115#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val) 115#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
116 116
117extern struct drm_ioctl_desc via_ioctls[]; 117extern const struct drm_ioctl_desc via_ioctls[];
118extern int via_max_ioctl; 118extern int via_max_ioctl;
119 119
120extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv); 120extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 0ab93ff09873..7e3ad87c366c 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
140 if (mem->type == VIA_MEM_AGP) 140 if (mem->type == VIA_MEM_AGP)
141 retval = drm_mm_insert_node(&dev_priv->agp_mm, 141 retval = drm_mm_insert_node(&dev_priv->agp_mm,
142 &item->mm_node, 142 &item->mm_node,
143 tmpSize, 0); 143 tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
144 else 144 else
145 retval = drm_mm_insert_node(&dev_priv->vram_mm, 145 retval = drm_mm_insert_node(&dev_priv->vram_mm,
146 &item->mm_node, 146 &item->mm_node,
147 tmpSize, 0); 147 tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
148 if (retval) 148 if (retval)
149 goto fail_alloc; 149 goto fail_alloc;
150 150
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 78e21649d48a..1a90f0a2f7e5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -124,7 +124,7 @@
124 * Ioctl definitions. 124 * Ioctl definitions.
125 */ 125 */
126 126
127static struct drm_ioctl_desc vmw_ioctls[] = { 127static const struct drm_ioctl_desc vmw_ioctls[] = {
128 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 128 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
129 DRM_AUTH | DRM_UNLOCKED), 129 DRM_AUTH | DRM_UNLOCKED),
130 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 130 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
@@ -622,8 +622,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
622 } 622 }
623 623
624 dev_priv->fman = vmw_fence_manager_init(dev_priv); 624 dev_priv->fman = vmw_fence_manager_init(dev_priv);
625 if (unlikely(dev_priv->fman == NULL)) 625 if (unlikely(dev_priv->fman == NULL)) {
626 ret = -ENOMEM;
626 goto out_no_fman; 627 goto out_no_fman;
628 }
627 629
628 vmw_kms_save_vga(dev_priv); 630 vmw_kms_save_vga(dev_priv);
629 631
@@ -782,7 +784,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
782 784
783 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 785 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
784 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 786 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
785 struct drm_ioctl_desc *ioctl = 787 const struct drm_ioctl_desc *ioctl =
786 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 788 &vmw_ioctls[nr - DRM_COMMAND_BASE];
787 789
788 if (unlikely(ioctl->cmd_drv != cmd)) { 790 if (unlikely(ioctl->cmd_drv != cmd)) {
@@ -795,29 +797,12 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
795 return drm_ioctl(filp, cmd, arg); 797 return drm_ioctl(filp, cmd, arg);
796} 798}
797 799
798static int vmw_firstopen(struct drm_device *dev)
799{
800 struct vmw_private *dev_priv = vmw_priv(dev);
801 dev_priv->is_opened = true;
802
803 return 0;
804}
805
806static void vmw_lastclose(struct drm_device *dev) 800static void vmw_lastclose(struct drm_device *dev)
807{ 801{
808 struct vmw_private *dev_priv = vmw_priv(dev);
809 struct drm_crtc *crtc; 802 struct drm_crtc *crtc;
810 struct drm_mode_set set; 803 struct drm_mode_set set;
811 int ret; 804 int ret;
812 805
813 /**
814 * Do nothing on the lastclose call from drm_unload.
815 */
816
817 if (!dev_priv->is_opened)
818 return;
819
820 dev_priv->is_opened = false;
821 set.x = 0; 806 set.x = 0;
822 set.y = 0; 807 set.y = 0;
823 set.fb = NULL; 808 set.fb = NULL;
@@ -1120,7 +1105,6 @@ static const struct file_operations vmwgfx_driver_fops = {
1120 .mmap = vmw_mmap, 1105 .mmap = vmw_mmap,
1121 .poll = vmw_fops_poll, 1106 .poll = vmw_fops_poll,
1122 .read = vmw_fops_read, 1107 .read = vmw_fops_read,
1123 .fasync = drm_fasync,
1124#if defined(CONFIG_COMPAT) 1108#if defined(CONFIG_COMPAT)
1125 .compat_ioctl = drm_compat_ioctl, 1109 .compat_ioctl = drm_compat_ioctl,
1126#endif 1110#endif
@@ -1132,7 +1116,6 @@ static struct drm_driver driver = {
1132 DRIVER_MODESET, 1116 DRIVER_MODESET,
1133 .load = vmw_driver_load, 1117 .load = vmw_driver_load,
1134 .unload = vmw_driver_unload, 1118 .unload = vmw_driver_unload,
1135 .firstopen = vmw_firstopen,
1136 .lastclose = vmw_lastclose, 1119 .lastclose = vmw_lastclose,
1137 .irq_preinstall = vmw_irq_preinstall, 1120 .irq_preinstall = vmw_irq_preinstall,
1138 .irq_postinstall = vmw_irq_postinstall, 1121 .irq_postinstall = vmw_irq_postinstall,
@@ -1143,7 +1126,6 @@ static struct drm_driver driver = {
1143 .disable_vblank = vmw_disable_vblank, 1126 .disable_vblank = vmw_disable_vblank,
1144 .ioctls = vmw_ioctls, 1127 .ioctls = vmw_ioctls,
1145 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), 1128 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1146 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
1147 .master_create = vmw_master_create, 1129 .master_create = vmw_master_create,
1148 .master_destroy = vmw_master_destroy, 1130 .master_destroy = vmw_master_destroy,
1149 .master_set = vmw_master_set, 1131 .master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 13aeda71280e..150ec64af617 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -324,7 +324,6 @@ struct vmw_private {
324 */ 324 */
325 325
326 bool stealth; 326 bool stealth;
327 bool is_opened;
328 bool enable_fb; 327 bool enable_fb;
329 328
330 /** 329 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 3751730764a5..1a0bf07fe54b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -29,7 +29,9 @@
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include <drm/ttm/ttm_bo_driver.h> 30#include <drm/ttm/ttm_bo_driver.h>
31 31
32#define VMW_PPN_SIZE sizeof(unsigned long) 32#define VMW_PPN_SIZE (sizeof(unsigned long))
33/* A future safe maximum remap size. */
34#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
33 35
34static int vmw_gmr2_bind(struct vmw_private *dev_priv, 36static int vmw_gmr2_bind(struct vmw_private *dev_priv,
35 struct page *pages[], 37 struct page *pages[],
@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
38{ 40{
39 SVGAFifoCmdDefineGMR2 define_cmd; 41 SVGAFifoCmdDefineGMR2 define_cmd;
40 SVGAFifoCmdRemapGMR2 remap_cmd; 42 SVGAFifoCmdRemapGMR2 remap_cmd;
41 uint32_t define_size = sizeof(define_cmd) + 4;
42 uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
43 uint32_t *cmd; 43 uint32_t *cmd;
44 uint32_t *cmd_orig; 44 uint32_t *cmd_orig;
45 uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
46 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
47 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
48 uint32_t remap_pos = 0;
49 uint32_t cmd_size = define_size + remap_size;
45 uint32_t i; 50 uint32_t i;
46 51
47 cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size); 52 cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
48 if (unlikely(cmd == NULL)) 53 if (unlikely(cmd == NULL))
49 return -ENOMEM; 54 return -ENOMEM;
50 55
51 define_cmd.gmrId = gmr_id; 56 define_cmd.gmrId = gmr_id;
52 define_cmd.numPages = num_pages; 57 define_cmd.numPages = num_pages;
53 58
59 *cmd++ = SVGA_CMD_DEFINE_GMR2;
60 memcpy(cmd, &define_cmd, sizeof(define_cmd));
61 cmd += sizeof(define_cmd) / sizeof(*cmd);
62
63 /*
64 * Need to split the command if there are too many
65 * pages that goes into the gmr.
66 */
67
54 remap_cmd.gmrId = gmr_id; 68 remap_cmd.gmrId = gmr_id;
55 remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? 69 remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
56 SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; 70 SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
57 remap_cmd.offsetPages = 0;
58 remap_cmd.numPages = num_pages;
59 71
60 *cmd++ = SVGA_CMD_DEFINE_GMR2; 72 while (num_pages > 0) {
61 memcpy(cmd, &define_cmd, sizeof(define_cmd)); 73 unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
62 cmd += sizeof(define_cmd) / sizeof(uint32); 74
75 remap_cmd.offsetPages = remap_pos;
76 remap_cmd.numPages = nr;
63 77
64 *cmd++ = SVGA_CMD_REMAP_GMR2; 78 *cmd++ = SVGA_CMD_REMAP_GMR2;
65 memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); 79 memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
66 cmd += sizeof(remap_cmd) / sizeof(uint32); 80 cmd += sizeof(remap_cmd) / sizeof(*cmd);
67 81
68 for (i = 0; i < num_pages; ++i) { 82 for (i = 0; i < nr; ++i) {
69 if (VMW_PPN_SIZE <= 4) 83 if (VMW_PPN_SIZE <= 4)
70 *cmd = page_to_pfn(*pages++); 84 *cmd = page_to_pfn(*pages++);
71 else 85 else
72 *((uint64_t *)cmd) = page_to_pfn(*pages++); 86 *((uint64_t *)cmd) = page_to_pfn(*pages++);
73 87
74 cmd += VMW_PPN_SIZE / sizeof(*cmd); 88 cmd += VMW_PPN_SIZE / sizeof(*cmd);
89 }
90
91 num_pages -= nr;
92 remap_pos += nr;
75 } 93 }
76 94
77 vmw_fifo_commit(dev_priv, define_size + remap_size); 95 BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
96
97 vmw_fifo_commit(dev_priv, cmd_size);
78 98
79 return 0; 99 return 0;
80} 100}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index d4607b2530d6..fc43c0601236 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1706,7 +1706,8 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1706 1706
1707int vmw_du_page_flip(struct drm_crtc *crtc, 1707int vmw_du_page_flip(struct drm_crtc *crtc,
1708 struct drm_framebuffer *fb, 1708 struct drm_framebuffer *fb,
1709 struct drm_pending_vblank_event *event) 1709 struct drm_pending_vblank_event *event,
1710 uint32_t page_flip_flags)
1710{ 1711{
1711 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1712 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1712 struct drm_framebuffer *old_fb = crtc->fb; 1713 struct drm_framebuffer *old_fb = crtc->fb;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6fa89c9d6214..8d038c36bd57 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -123,7 +123,8 @@ struct vmw_display_unit {
123void vmw_display_unit_cleanup(struct vmw_display_unit *du); 123void vmw_display_unit_cleanup(struct vmw_display_unit *du);
124int vmw_du_page_flip(struct drm_crtc *crtc, 124int vmw_du_page_flip(struct drm_crtc *crtc,
125 struct drm_framebuffer *fb, 125 struct drm_framebuffer *fb,
126 struct drm_pending_vblank_event *event); 126 struct drm_pending_vblank_event *event,
127 uint32_t page_flip_flags);
127void vmw_du_crtc_save(struct drm_crtc *crtc); 128void vmw_du_crtc_save(struct drm_crtc *crtc);
128void vmw_du_crtc_restore(struct drm_crtc *crtc); 129void vmw_du_crtc_restore(struct drm_crtc *crtc);
129void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 130void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7953d1f90b63..0e67cf41065d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
500 goto out_no_dmabuf; 500 goto out_no_dmabuf;
501 501
502 rep->handle = handle; 502 rep->handle = handle;
503 rep->map_handle = dma_buf->base.addr_space_offset; 503 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
504 rep->cur_gmr_id = handle; 504 rep->cur_gmr_id = handle;
505 rep->cur_gmr_offset = 0; 505 rep->cur_gmr_offset = 0;
506 506
@@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
834 if (ret != 0) 834 if (ret != 0)
835 return -EINVAL; 835 return -EINVAL;
836 836
837 *offset = out_buf->base.addr_space_offset; 837 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
838 vmw_dmabuf_unreference(&out_buf); 838 vmw_dmabuf_unreference(&out_buf);
839 return 0; 839 return 0;
840} 840}
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
index 5360e5a57ecc..b1a05ad901c3 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/host1x/drm/dc.c
@@ -235,7 +235,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
235} 235}
236 236
237static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 237static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
238 struct drm_pending_vblank_event *event) 238 struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
239{ 239{
240 struct tegra_dc *dc = to_tegra_dc(crtc); 240 struct tegra_dc *dc = to_tegra_dc(crtc);
241 struct drm_device *drm = crtc->dev; 241 struct drm_device *drm = crtc->dev;
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
index e184b00faacd..8c61ceeaa12d 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -356,7 +356,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
356 356
357 bo = to_tegra_bo(gem); 357 bo = to_tegra_bo(gem);
358 358
359 args->offset = tegra_bo_get_mmap_offset(bo); 359 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
360 360
361 drm_gem_object_unreference(gem); 361 drm_gem_object_unreference(gem);
362 362
@@ -487,7 +487,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
487} 487}
488#endif 488#endif
489 489
490static struct drm_ioctl_desc tegra_drm_ioctls[] = { 490static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
491#ifdef CONFIG_DRM_TEGRA_STAGING 491#ifdef CONFIG_DRM_TEGRA_STAGING
492 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH), 492 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
493 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED), 493 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
@@ -508,7 +508,6 @@ static const struct file_operations tegra_drm_fops = {
508 .unlocked_ioctl = drm_ioctl, 508 .unlocked_ioctl = drm_ioctl,
509 .mmap = tegra_drm_mmap, 509 .mmap = tegra_drm_mmap,
510 .poll = drm_poll, 510 .poll = drm_poll,
511 .fasync = drm_fasync,
512 .read = drm_read, 511 .read = drm_read,
513#ifdef CONFIG_COMPAT 512#ifdef CONFIG_COMPAT
514 .compat_ioctl = drm_compat_ioctl, 513 .compat_ioctl = drm_compat_ioctl,
@@ -633,7 +632,7 @@ struct drm_driver tegra_drm_driver = {
633 .gem_vm_ops = &tegra_bo_vm_ops, 632 .gem_vm_ops = &tegra_bo_vm_ops,
634 .dumb_create = tegra_bo_dumb_create, 633 .dumb_create = tegra_bo_dumb_create,
635 .dumb_map_offset = tegra_bo_dumb_map_offset, 634 .dumb_map_offset = tegra_bo_dumb_map_offset,
636 .dumb_destroy = tegra_bo_dumb_destroy, 635 .dumb_destroy = drm_gem_dumb_destroy,
637 636
638 .ioctls = tegra_drm_ioctls, 637 .ioctls = tegra_drm_ioctls,
639 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 638 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
index c5e9a9b494c2..59623de4ee15 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/host1x/drm/gem.c
@@ -106,11 +106,6 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
107} 107}
108 108
109unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
110{
111 return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
112}
113
114struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size) 109struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
115{ 110{
116 struct tegra_bo *bo; 111 struct tegra_bo *bo;
@@ -182,8 +177,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
182{ 177{
183 struct tegra_bo *bo = to_tegra_bo(gem); 178 struct tegra_bo *bo = to_tegra_bo(gem);
184 179
185 if (gem->map_list.map) 180 drm_gem_free_mmap_offset(gem);
186 drm_gem_free_mmap_offset(gem);
187 181
188 drm_gem_object_release(gem); 182 drm_gem_object_release(gem);
189 tegra_bo_destroy(gem->dev, bo); 183 tegra_bo_destroy(gem->dev, bo);
@@ -228,7 +222,7 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
228 222
229 bo = to_tegra_bo(gem); 223 bo = to_tegra_bo(gem);
230 224
231 *offset = tegra_bo_get_mmap_offset(bo); 225 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
232 226
233 drm_gem_object_unreference(gem); 227 drm_gem_object_unreference(gem);
234 228
@@ -262,9 +256,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
262 256
263 return ret; 257 return ret;
264} 258}
265
266int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
267 unsigned int handle)
268{
269 return drm_gem_handle_delete(file, handle);
270}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
index 34de2b486eb7..492533a2dacb 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/host1x/drm/gem.h
@@ -44,13 +44,10 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
44 unsigned int size, 44 unsigned int size,
45 unsigned int *handle); 45 unsigned int *handle);
46void tegra_bo_free_object(struct drm_gem_object *gem); 46void tegra_bo_free_object(struct drm_gem_object *gem);
47unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
48int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 47int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
49 struct drm_mode_create_dumb *args); 48 struct drm_mode_create_dumb *args);
50int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, 49int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
51 uint32_t handle, uint64_t *offset); 50 uint32_t handle, uint64_t *offset);
52int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
53 unsigned int handle);
54 51
55int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma); 52int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
56 53
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c
index bf7f02743419..644d95c7d489 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/host1x/drm/hdmi.c
@@ -551,24 +551,8 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
551 return; 551 return;
552 } 552 }
553 553
554 memset(&frame, 0, sizeof(frame)); 554 hdmi_vendor_infoframe_init(&frame);
555 555 frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
556 frame.type = HDMI_INFOFRAME_TYPE_VENDOR;
557 frame.version = 0x01;
558 frame.length = 6;
559
560 frame.data[0] = 0x03; /* regid0 */
561 frame.data[1] = 0x0c; /* regid1 */
562 frame.data[2] = 0x00; /* regid2 */
563 frame.data[3] = 0x02 << 5; /* video format */
564
565 /* TODO: 74 MHz limit? */
566 if (1) {
567 frame.data[4] = 0x00 << 4; /* 3D structure */
568 } else {
569 frame.data[4] = 0x08 << 4; /* 3D structure */
570 frame.data[5] = 0x00 << 4; /* 3D ext. data */
571 }
572 556
573 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); 557 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
574 if (err < 0) { 558 if (err < 0) {
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cf787e1d9322..ec0ae2d1686a 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -27,6 +27,7 @@
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/vga_switcheroo.h> 29#include <linux/vga_switcheroo.h>
30#include <linux/pm_runtime.h>
30 31
31#include <linux/vgaarb.h> 32#include <linux/vgaarb.h>
32 33
@@ -37,6 +38,7 @@ struct vga_switcheroo_client {
37 const struct vga_switcheroo_client_ops *ops; 38 const struct vga_switcheroo_client_ops *ops;
38 int id; 39 int id;
39 bool active; 40 bool active;
41 bool driver_power_control;
40 struct list_head list; 42 struct list_head list;
41}; 43};
42 44
@@ -132,7 +134,7 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
132 134
133static int register_client(struct pci_dev *pdev, 135static int register_client(struct pci_dev *pdev,
134 const struct vga_switcheroo_client_ops *ops, 136 const struct vga_switcheroo_client_ops *ops,
135 int id, bool active) 137 int id, bool active, bool driver_power_control)
136{ 138{
137 struct vga_switcheroo_client *client; 139 struct vga_switcheroo_client *client;
138 140
@@ -145,6 +147,7 @@ static int register_client(struct pci_dev *pdev,
145 client->ops = ops; 147 client->ops = ops;
146 client->id = id; 148 client->id = id;
147 client->active = active; 149 client->active = active;
150 client->driver_power_control = driver_power_control;
148 151
149 mutex_lock(&vgasr_mutex); 152 mutex_lock(&vgasr_mutex);
150 list_add_tail(&client->list, &vgasr_priv.clients); 153 list_add_tail(&client->list, &vgasr_priv.clients);
@@ -160,10 +163,11 @@ static int register_client(struct pci_dev *pdev,
160} 163}
161 164
162int vga_switcheroo_register_client(struct pci_dev *pdev, 165int vga_switcheroo_register_client(struct pci_dev *pdev,
163 const struct vga_switcheroo_client_ops *ops) 166 const struct vga_switcheroo_client_ops *ops,
167 bool driver_power_control)
164{ 168{
165 return register_client(pdev, ops, -1, 169 return register_client(pdev, ops, -1,
166 pdev == vga_default_device()); 170 pdev == vga_default_device(), driver_power_control);
167} 171}
168EXPORT_SYMBOL(vga_switcheroo_register_client); 172EXPORT_SYMBOL(vga_switcheroo_register_client);
169 173
@@ -171,7 +175,7 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
171 const struct vga_switcheroo_client_ops *ops, 175 const struct vga_switcheroo_client_ops *ops,
172 int id, bool active) 176 int id, bool active)
173{ 177{
174 return register_client(pdev, ops, id | ID_BIT_AUDIO, active); 178 return register_client(pdev, ops, id | ID_BIT_AUDIO, active, false);
175} 179}
176EXPORT_SYMBOL(vga_switcheroo_register_audio_client); 180EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
177 181
@@ -258,10 +262,11 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
258 int i = 0; 262 int i = 0;
259 mutex_lock(&vgasr_mutex); 263 mutex_lock(&vgasr_mutex);
260 list_for_each_entry(client, &vgasr_priv.clients, list) { 264 list_for_each_entry(client, &vgasr_priv.clients, list) {
261 seq_printf(m, "%d:%s%s:%c:%s:%s\n", i, 265 seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
262 client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD", 266 client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
263 client_is_vga(client) ? "" : "-Audio", 267 client_is_vga(client) ? "" : "-Audio",
264 client->active ? '+' : ' ', 268 client->active ? '+' : ' ',
269 client->driver_power_control ? "Dyn" : "",
265 client->pwr_state ? "Pwr" : "Off", 270 client->pwr_state ? "Pwr" : "Off",
266 pci_name(client->pdev)); 271 pci_name(client->pdev));
267 i++; 272 i++;
@@ -277,6 +282,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
277 282
278static int vga_switchon(struct vga_switcheroo_client *client) 283static int vga_switchon(struct vga_switcheroo_client *client)
279{ 284{
285 if (client->driver_power_control)
286 return 0;
280 if (vgasr_priv.handler->power_state) 287 if (vgasr_priv.handler->power_state)
281 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON); 288 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
282 /* call the driver callback to turn on device */ 289 /* call the driver callback to turn on device */
@@ -287,6 +294,8 @@ static int vga_switchon(struct vga_switcheroo_client *client)
287 294
288static int vga_switchoff(struct vga_switcheroo_client *client) 295static int vga_switchoff(struct vga_switcheroo_client *client)
289{ 296{
297 if (client->driver_power_control)
298 return 0;
290 /* call the driver callback to turn off device */ 299 /* call the driver callback to turn off device */
291 client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF); 300 client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
292 if (vgasr_priv.handler->power_state) 301 if (vgasr_priv.handler->power_state)
@@ -402,6 +411,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
402 list_for_each_entry(client, &vgasr_priv.clients, list) { 411 list_for_each_entry(client, &vgasr_priv.clients, list) {
403 if (client->active || client_is_audio(client)) 412 if (client->active || client_is_audio(client))
404 continue; 413 continue;
414 if (client->driver_power_control)
415 continue;
405 set_audio_state(client->id, VGA_SWITCHEROO_OFF); 416 set_audio_state(client->id, VGA_SWITCHEROO_OFF);
406 if (client->pwr_state == VGA_SWITCHEROO_ON) 417 if (client->pwr_state == VGA_SWITCHEROO_ON)
407 vga_switchoff(client); 418 vga_switchoff(client);
@@ -413,6 +424,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
413 list_for_each_entry(client, &vgasr_priv.clients, list) { 424 list_for_each_entry(client, &vgasr_priv.clients, list) {
414 if (client->active || client_is_audio(client)) 425 if (client->active || client_is_audio(client))
415 continue; 426 continue;
427 if (client->driver_power_control)
428 continue;
416 if (client->pwr_state == VGA_SWITCHEROO_OFF) 429 if (client->pwr_state == VGA_SWITCHEROO_OFF)
417 vga_switchon(client); 430 vga_switchon(client);
418 set_audio_state(client->id, VGA_SWITCHEROO_ON); 431 set_audio_state(client->id, VGA_SWITCHEROO_ON);
@@ -565,3 +578,127 @@ err:
565 return err; 578 return err;
566} 579}
567EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); 580EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
581
582static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state)
583{
584 struct vga_switcheroo_client *client;
585
586 if (!vgasr_priv.handler->power_state)
587 return;
588
589 client = find_client_from_pci(&vgasr_priv.clients, pdev);
590 if (!client)
591 return;
592
593 if (!client->driver_power_control)
594 return;
595
596 vgasr_priv.handler->power_state(client->id, state);
597}
598
599/* force a PCI device to a certain state - mainly to turn off audio clients */
600
601void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic)
602{
603 struct vga_switcheroo_client *client;
604
605 client = find_client_from_pci(&vgasr_priv.clients, pdev);
606 if (!client)
607 return;
608
609 if (!client->driver_power_control)
610 return;
611
612 client->pwr_state = dynamic;
613 set_audio_state(client->id, dynamic);
614}
615EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch);
616
617/* switcheroo power domain */
618static int vga_switcheroo_runtime_suspend(struct device *dev)
619{
620 struct pci_dev *pdev = to_pci_dev(dev);
621 int ret;
622
623 ret = dev->bus->pm->runtime_suspend(dev);
624 if (ret)
625 return ret;
626
627 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
628 return 0;
629}
630
631static int vga_switcheroo_runtime_resume(struct device *dev)
632{
633 struct pci_dev *pdev = to_pci_dev(dev);
634 int ret;
635
636 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
637 ret = dev->bus->pm->runtime_resume(dev);
638 if (ret)
639 return ret;
640
641 return 0;
642}
643
644/* this version is for the case where the power switch is separate
645 to the device being powered down. */
646int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
647{
648 /* copy over all the bus versions */
649 if (dev->bus && dev->bus->pm) {
650 domain->ops = *dev->bus->pm;
651 domain->ops.runtime_suspend = vga_switcheroo_runtime_suspend;
652 domain->ops.runtime_resume = vga_switcheroo_runtime_resume;
653
654 dev->pm_domain = domain;
655 return 0;
656 }
657 dev->pm_domain = NULL;
658 return -EINVAL;
659}
660EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
661
662static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
663{
664 struct pci_dev *pdev = to_pci_dev(dev);
665 int ret;
666 struct vga_switcheroo_client *client, *found = NULL;
667
668 /* we need to check if we have to switch back on the video
669 device so the audio device can come back */
670 list_for_each_entry(client, &vgasr_priv.clients, list) {
671 if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) {
672 found = client;
673 ret = pm_runtime_get_sync(&client->pdev->dev);
674 if (ret) {
675 if (ret != 1)
676 return ret;
677 }
678 break;
679 }
680 }
681 ret = dev->bus->pm->runtime_resume(dev);
682
683 /* put the reference for the gpu */
684 if (found) {
685 pm_runtime_mark_last_busy(&found->pdev->dev);
686 pm_runtime_put_autosuspend(&found->pdev->dev);
687 }
688 return ret;
689}
690
691int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
692{
693 /* copy over all the bus versions */
694 if (dev->bus && dev->bus->pm) {
695 domain->ops = *dev->bus->pm;
696 domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio;
697
698 dev->pm_domain = domain;
699 return 0;
700 }
701 dev->pm_domain = NULL;
702 return -EINVAL;
703}
704EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_optimus_hdmi_audio);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 5207591a598c..cd33084c7860 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -192,6 +192,7 @@ static struct hid_ll_driver logi_dj_ll_driver;
192static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf, 192static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
193 size_t count, 193 size_t count,
194 unsigned char report_type); 194 unsigned char report_type);
195static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
195 196
196static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev, 197static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
197 struct dj_report *dj_report) 198 struct dj_report *dj_report)
@@ -232,6 +233,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
232 if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] & 233 if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
233 SPFUNCTION_DEVICE_LIST_EMPTY) { 234 SPFUNCTION_DEVICE_LIST_EMPTY) {
234 dbg_hid("%s: device list is empty\n", __func__); 235 dbg_hid("%s: device list is empty\n", __func__);
236 djrcv_dev->querying_devices = false;
235 return; 237 return;
236 } 238 }
237 239
@@ -242,6 +244,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
242 return; 244 return;
243 } 245 }
244 246
247 if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
248 /* The device is already known. No need to reallocate it. */
249 dbg_hid("%s: device is already known\n", __func__);
250 return;
251 }
252
245 dj_hiddev = hid_allocate_device(); 253 dj_hiddev = hid_allocate_device();
246 if (IS_ERR(dj_hiddev)) { 254 if (IS_ERR(dj_hiddev)) {
247 dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n", 255 dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
@@ -305,6 +313,7 @@ static void delayedwork_callback(struct work_struct *work)
305 struct dj_report dj_report; 313 struct dj_report dj_report;
306 unsigned long flags; 314 unsigned long flags;
307 int count; 315 int count;
316 int retval;
308 317
309 dbg_hid("%s\n", __func__); 318 dbg_hid("%s\n", __func__);
310 319
@@ -337,6 +346,25 @@ static void delayedwork_callback(struct work_struct *work)
337 logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report); 346 logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
338 break; 347 break;
339 default: 348 default:
349 /* A normal report (i. e. not belonging to a pair/unpair notification)
350 * arriving here, means that the report arrived but we did not have a
351 * paired dj_device associated to the report's device_index, this
352 * means that the original "device paired" notification corresponding
353 * to this dj_device never arrived to this driver. The reason is that
354 * hid-core discards all packets coming from a device while probe() is
355 * executing. */
356 if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
357 /* ok, we don't know the device, just re-ask the
358 * receiver for the list of connected devices. */
359 retval = logi_dj_recv_query_paired_devices(djrcv_dev);
360 if (!retval) {
361 /* everything went fine, so just leave */
362 break;
363 }
364 dev_err(&djrcv_dev->hdev->dev,
365 "%s:logi_dj_recv_query_paired_devices "
366 "error:%d\n", __func__, retval);
367 }
340 dbg_hid("%s: unexpected report type\n", __func__); 368 dbg_hid("%s: unexpected report type\n", __func__);
341 } 369 }
342} 370}
@@ -367,6 +395,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
367 if (!djdev) { 395 if (!djdev) {
368 dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" 396 dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
369 " is NULL, index %d\n", dj_report->device_index); 397 " is NULL, index %d\n", dj_report->device_index);
398 kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
399
400 if (schedule_work(&djrcv_dev->work) == 0) {
401 dbg_hid("%s: did not schedule the work item, was already "
402 "queued\n", __func__);
403 }
370 return; 404 return;
371 } 405 }
372 406
@@ -397,6 +431,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
397 if (dj_device == NULL) { 431 if (dj_device == NULL) {
398 dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" 432 dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
399 " is NULL, index %d\n", dj_report->device_index); 433 " is NULL, index %d\n", dj_report->device_index);
434 kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
435
436 if (schedule_work(&djrcv_dev->work) == 0) {
437 dbg_hid("%s: did not schedule the work item, was already "
438 "queued\n", __func__);
439 }
400 return; 440 return;
401 } 441 }
402 442
@@ -444,6 +484,10 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
444 struct dj_report *dj_report; 484 struct dj_report *dj_report;
445 int retval; 485 int retval;
446 486
487 /* no need to protect djrcv_dev->querying_devices */
488 if (djrcv_dev->querying_devices)
489 return 0;
490
447 dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); 491 dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
448 if (!dj_report) 492 if (!dj_report)
449 return -ENOMEM; 493 return -ENOMEM;
@@ -455,6 +499,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
455 return retval; 499 return retval;
456} 500}
457 501
502
458static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, 503static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
459 unsigned timeout) 504 unsigned timeout)
460{ 505{
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
index fd28a5e0ca3b..4a4000340ce1 100644
--- a/drivers/hid/hid-logitech-dj.h
+++ b/drivers/hid/hid-logitech-dj.h
@@ -101,6 +101,7 @@ struct dj_receiver_dev {
101 struct work_struct work; 101 struct work_struct work;
102 struct kfifo notif_fifo; 102 struct kfifo notif_fifo;
103 spinlock_t lock; 103 spinlock_t lock;
104 bool querying_devices;
104}; 105};
105 106
106struct dj_device { 107struct dj_device {
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index ecbc74923d06..87fbe2924cfa 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -369,7 +369,8 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
369 if (sc->quirks & PS3REMOTE) 369 if (sc->quirks & PS3REMOTE)
370 return ps3remote_mapping(hdev, hi, field, usage, bit, max); 370 return ps3remote_mapping(hdev, hi, field, usage, bit, max);
371 371
372 return -1; 372 /* Let hid-core decide for the others */
373 return 0;
373} 374}
374 375
375/* 376/*
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index a7451632ceb4..6f1feb2c2e97 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -518,7 +518,6 @@ int hidraw_connect(struct hid_device *hid)
518 goto out; 518 goto out;
519 } 519 }
520 520
521 mutex_unlock(&minors_lock);
522 init_waitqueue_head(&dev->wait); 521 init_waitqueue_head(&dev->wait);
523 INIT_LIST_HEAD(&dev->list); 522 INIT_LIST_HEAD(&dev->list);
524 523
@@ -528,6 +527,7 @@ int hidraw_connect(struct hid_device *hid)
528 dev->exist = 1; 527 dev->exist = 1;
529 hid->hidraw = dev; 528 hid->hidraw = dev;
530 529
530 mutex_unlock(&minors_lock);
531out: 531out:
532 return result; 532 return result;
533 533
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 4c605c70ebf9..deb5c25305af 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -562,7 +562,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
562 struct hv_hotadd_state *has) 562 struct hv_hotadd_state *has)
563{ 563{
564 int ret = 0; 564 int ret = 0;
565 int i, nid, t; 565 int i, nid;
566 unsigned long start_pfn; 566 unsigned long start_pfn;
567 unsigned long processed_pfn; 567 unsigned long processed_pfn;
568 unsigned long total_pfn = pfn_count; 568 unsigned long total_pfn = pfn_count;
@@ -607,14 +607,11 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
607 607
608 /* 608 /*
609 * Wait for the memory block to be onlined. 609 * Wait for the memory block to be onlined.
610 * Since the hot add has succeeded, it is ok to
611 * proceed even if the pages in the hot added region
612 * have not been "onlined" within the allowed time.
610 */ 613 */
611 t = wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); 614 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
612 if (t == 0) {
613 pr_info("hot_add memory timedout\n");
614 has->ha_end_pfn -= HA_CHUNK;
615 has->covered_end_pfn -= processed_pfn;
616 break;
617 }
618 615
619 } 616 }
620 617
@@ -978,6 +975,14 @@ static void post_status(struct hv_dynmem_device *dm)
978 dm->num_pages_ballooned + 975 dm->num_pages_ballooned +
979 compute_balloon_floor(); 976 compute_balloon_floor();
980 977
978 /*
979 * If our transaction ID is no longer current, just don't
980 * send the status. This can happen if we were interrupted
981 * after we picked our transaction ID.
982 */
983 if (status.hdr.trans_id != atomic_read(&trans_id))
984 return;
985
981 vmbus_sendpacket(dm->dev->channel, &status, 986 vmbus_sendpacket(dm->dev->channel, &status,
982 sizeof(struct dm_status), 987 sizeof(struct dm_status),
983 (unsigned long)NULL, 988 (unsigned long)NULL,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a2464bf07c49..e8e071fc1d6d 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -690,7 +690,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
690 if (ret) 690 if (ret)
691 pr_err("Unable to register child device\n"); 691 pr_err("Unable to register child device\n");
692 else 692 else
693 pr_info("child device %s registered\n", 693 pr_debug("child device %s registered\n",
694 dev_name(&child_device_obj->device)); 694 dev_name(&child_device_obj->device));
695 695
696 return ret; 696 return ret;
@@ -702,14 +702,14 @@ int vmbus_device_register(struct hv_device *child_device_obj)
702 */ 702 */
703void vmbus_device_unregister(struct hv_device *device_obj) 703void vmbus_device_unregister(struct hv_device *device_obj)
704{ 704{
705 pr_debug("child device %s unregistered\n",
706 dev_name(&device_obj->device));
707
705 /* 708 /*
706 * Kick off the process of unregistering the device. 709 * Kick off the process of unregistering the device.
707 * This will call vmbus_remove() and eventually vmbus_device_release() 710 * This will call vmbus_remove() and eventually vmbus_device_release()
708 */ 711 */
709 device_unregister(&device_obj->device); 712 device_unregister(&device_obj->device);
710
711 pr_info("child device %s unregistered\n",
712 dev_name(&device_obj->device));
713} 713}
714 714
715 715
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f34bca9f5e5..6099f50b28aa 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg,
215 u16 value) 215 u16 value)
216{ 216{
217 return i2c_smbus_write_byte_data(client, reg, value & 0xFF) 217 return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
218 && i2c_smbus_write_byte_data(client, reg + 1, value >> 8); 218 || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
219} 219}
220 220
221static void adt7470_init_client(struct i2c_client *client) 221static void adt7470_init_client(struct i2c_client *client)
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 328fb0353c17..a41b5f3fc506 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -605,12 +605,12 @@ static int max6697_init_chip(struct i2c_client *client)
605 if (ret < 0) 605 if (ret < 0)
606 return ret; 606 return ret;
607 ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY, 607 ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
608 pdata->ideality_mask >> 1); 608 pdata->ideality_value);
609 if (ret < 0) 609 if (ret < 0)
610 return ret; 610 return ret;
611 ret = i2c_smbus_write_byte_data(client, 611 ret = i2c_smbus_write_byte_data(client,
612 MAX6581_REG_IDEALITY_SELECT, 612 MAX6581_REG_IDEALITY_SELECT,
613 pdata->ideality_value); 613 pdata->ideality_mask >> 1);
614 if (ret < 0) 614 if (ret < 0)
615 return ret; 615 return ret;
616 } 616 }
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index ccec916bc3eb..af8f65fb1c05 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -246,9 +246,9 @@ static void kempld_i2c_device_init(struct kempld_i2c_data *i2c)
246 bus_frequency = KEMPLD_I2C_FREQ_MAX; 246 bus_frequency = KEMPLD_I2C_FREQ_MAX;
247 247
248 if (pld->info.spec_major == 1) 248 if (pld->info.spec_major == 1)
249 prescale = pld->pld_clock / bus_frequency * 5 - 1000; 249 prescale = pld->pld_clock / (bus_frequency * 5) - 1000;
250 else 250 else
251 prescale = pld->pld_clock / bus_frequency * 4 - 3000; 251 prescale = pld->pld_clock / (bus_frequency * 4) - 3000;
252 252
253 if (prescale < 0) 253 if (prescale < 0)
254 prescale = 0; 254 prescale = 0;
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index df8ff5aea5b5..e2e9a0dade96 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -493,7 +493,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
493 * based on this empirical measurement and a lot of previous frobbing. 493 * based on this empirical measurement and a lot of previous frobbing.
494 */ 494 */
495 i2c->cmd_err = 0; 495 i2c->cmd_err = 0;
496 if (msg->len < 8) { 496 if (0) { /* disable PIO mode until a proper fix is made */
497 ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); 497 ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
498 if (ret) 498 if (ret)
499 mxs_i2c_reset(i2c); 499 mxs_i2c_reset(i2c);
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 0ad208a69c29..3ceac3e91dde 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -60,7 +60,6 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
60{ 60{
61 unsigned int stepconfig; 61 unsigned int stepconfig;
62 int i, steps; 62 int i, steps;
63 u32 step_en;
64 63
65 /* 64 /*
66 * There are 16 configurable steps and 8 analog input 65 * There are 16 configurable steps and 8 analog input
@@ -86,8 +85,7 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
86 adc_dev->channel_step[i] = steps; 85 adc_dev->channel_step[i] = steps;
87 steps++; 86 steps++;
88 } 87 }
89 step_en = get_adc_step_mask(adc_dev); 88
90 am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
91} 89}
92 90
93static const char * const chan_name_ain[] = { 91static const char * const chan_name_ain[] = {
@@ -142,10 +140,22 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
142 int *val, int *val2, long mask) 140 int *val, int *val2, long mask)
143{ 141{
144 struct tiadc_device *adc_dev = iio_priv(indio_dev); 142 struct tiadc_device *adc_dev = iio_priv(indio_dev);
145 int i; 143 int i, map_val;
146 unsigned int fifo1count, read; 144 unsigned int fifo1count, read, stepid;
147 u32 step = UINT_MAX; 145 u32 step = UINT_MAX;
148 bool found = false; 146 bool found = false;
147 u32 step_en;
148 unsigned long timeout = jiffies + usecs_to_jiffies
149 (IDLE_TIMEOUT * adc_dev->channels);
150 step_en = get_adc_step_mask(adc_dev);
151 am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
152
153 /* Wait for ADC sequencer to complete sampling */
154 while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) {
155 if (time_after(jiffies, timeout))
156 return -EAGAIN;
157 }
158 map_val = chan->channel + TOTAL_CHANNELS;
149 159
150 /* 160 /*
151 * When the sub-system is first enabled, 161 * When the sub-system is first enabled,
@@ -170,12 +180,16 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
170 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 180 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
171 for (i = 0; i < fifo1count; i++) { 181 for (i = 0; i < fifo1count; i++) {
172 read = tiadc_readl(adc_dev, REG_FIFO1); 182 read = tiadc_readl(adc_dev, REG_FIFO1);
173 if (read >> 16 == step) { 183 stepid = read & FIFOREAD_CHNLID_MASK;
174 *val = read & 0xfff; 184 stepid = stepid >> 0x10;
185
186 if (stepid == map_val) {
187 read = read & FIFOREAD_DATA_MASK;
175 found = true; 188 found = true;
189 *val = read;
176 } 190 }
177 } 191 }
178 am335x_tsc_se_update(adc_dev->mfd_tscadc); 192
179 if (found == false) 193 if (found == false)
180 return -EBUSY; 194 return -EBUSY;
181 return IIO_VAL_INT; 195 return IIO_VAL_INT;
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index ea8a4146620d..0dd9bb873130 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -127,12 +127,17 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name,
127void iio_trigger_poll(struct iio_trigger *trig, s64 time) 127void iio_trigger_poll(struct iio_trigger *trig, s64 time)
128{ 128{
129 int i; 129 int i;
130 if (!trig->use_count) 130
131 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) 131 if (!atomic_read(&trig->use_count)) {
132 if (trig->subirqs[i].enabled) { 132 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
133 trig->use_count++; 133
134 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
135 if (trig->subirqs[i].enabled)
134 generic_handle_irq(trig->subirq_base + i); 136 generic_handle_irq(trig->subirq_base + i);
135 } 137 else
138 iio_trigger_notify_done(trig);
139 }
140 }
136} 141}
137EXPORT_SYMBOL(iio_trigger_poll); 142EXPORT_SYMBOL(iio_trigger_poll);
138 143
@@ -146,19 +151,24 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
146void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) 151void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
147{ 152{
148 int i; 153 int i;
149 if (!trig->use_count) 154
150 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) 155 if (!atomic_read(&trig->use_count)) {
151 if (trig->subirqs[i].enabled) { 156 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
152 trig->use_count++; 157
158 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
159 if (trig->subirqs[i].enabled)
153 handle_nested_irq(trig->subirq_base + i); 160 handle_nested_irq(trig->subirq_base + i);
154 } 161 else
162 iio_trigger_notify_done(trig);
163 }
164 }
155} 165}
156EXPORT_SYMBOL(iio_trigger_poll_chained); 166EXPORT_SYMBOL(iio_trigger_poll_chained);
157 167
158void iio_trigger_notify_done(struct iio_trigger *trig) 168void iio_trigger_notify_done(struct iio_trigger *trig)
159{ 169{
160 trig->use_count--; 170 if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
161 if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable) 171 trig->ops->try_reenable)
162 if (trig->ops->try_reenable(trig)) 172 if (trig->ops->try_reenable(trig))
163 /* Missed an interrupt so launch new poll now */ 173 /* Missed an interrupt so launch new poll now */
164 iio_trigger_poll(trig, 0); 174 iio_trigger_poll(trig, 0);
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 5f4749e60b04..c1cd5698b8ae 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
232 232
233 switch (mask) { 233 switch (mask) {
234 case IIO_CHAN_INFO_RAW: 234 case IIO_CHAN_INFO_RAW:
235 ret = adjd_s311_read_data(indio_dev, chan->address, val); 235 ret = adjd_s311_read_data(indio_dev,
236 ADJD_S311_DATA_REG(chan->address), val);
236 if (ret < 0) 237 if (ret < 0)
237 return ret; 238 return ret;
238 return IIO_VAL_INT; 239 return IIO_VAL_INT;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f1c279fabe64..7c0f9535fb7d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -423,7 +423,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
423 struct sockaddr_ib *addr; 423 struct sockaddr_ib *addr;
424 union ib_gid gid, sgid, *dgid; 424 union ib_gid gid, sgid, *dgid;
425 u16 pkey, index; 425 u16 pkey, index;
426 u8 port, p; 426 u8 p;
427 int i; 427 int i;
428 428
429 cma_dev = NULL; 429 cma_dev = NULL;
@@ -443,7 +443,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
443 if (!memcmp(&gid, dgid, sizeof(gid))) { 443 if (!memcmp(&gid, dgid, sizeof(gid))) {
444 cma_dev = cur_dev; 444 cma_dev = cur_dev;
445 sgid = gid; 445 sgid = gid;
446 port = p; 446 id_priv->id.port_num = p;
447 goto found; 447 goto found;
448 } 448 }
449 449
@@ -451,7 +451,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
451 dgid->global.subnet_prefix)) { 451 dgid->global.subnet_prefix)) {
452 cma_dev = cur_dev; 452 cma_dev = cur_dev;
453 sgid = gid; 453 sgid = gid;
454 port = p; 454 id_priv->id.port_num = p;
455 } 455 }
456 } 456 }
457 } 457 }
@@ -462,7 +462,6 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
462 462
463found: 463found:
464 cma_attach_to_dev(id_priv, cma_dev); 464 cma_attach_to_dev(id_priv, cma_dev);
465 id_priv->id.port_num = port;
466 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 465 addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
467 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 466 memcpy(&addr->sib_addr, &sgid, sizeof sgid);
468 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 467 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
@@ -880,7 +879,8 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
880{ 879{
881 struct cma_hdr *hdr; 880 struct cma_hdr *hdr;
882 881
883 if (listen_id->route.addr.src_addr.ss_family == AF_IB) { 882 if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
883 (ib_event->event == IB_CM_REQ_RECEIVED)) {
884 cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); 884 cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
885 return 0; 885 return 0;
886 } 886 }
@@ -2677,29 +2677,32 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2677{ 2677{
2678 struct ib_cm_sidr_req_param req; 2678 struct ib_cm_sidr_req_param req;
2679 struct ib_cm_id *id; 2679 struct ib_cm_id *id;
2680 void *private_data;
2680 int offset, ret; 2681 int offset, ret;
2681 2682
2683 memset(&req, 0, sizeof req);
2682 offset = cma_user_data_offset(id_priv); 2684 offset = cma_user_data_offset(id_priv);
2683 req.private_data_len = offset + conn_param->private_data_len; 2685 req.private_data_len = offset + conn_param->private_data_len;
2684 if (req.private_data_len < conn_param->private_data_len) 2686 if (req.private_data_len < conn_param->private_data_len)
2685 return -EINVAL; 2687 return -EINVAL;
2686 2688
2687 if (req.private_data_len) { 2689 if (req.private_data_len) {
2688 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2690 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2689 if (!req.private_data) 2691 if (!private_data)
2690 return -ENOMEM; 2692 return -ENOMEM;
2691 } else { 2693 } else {
2692 req.private_data = NULL; 2694 private_data = NULL;
2693 } 2695 }
2694 2696
2695 if (conn_param->private_data && conn_param->private_data_len) 2697 if (conn_param->private_data && conn_param->private_data_len)
2696 memcpy((void *) req.private_data + offset, 2698 memcpy(private_data + offset, conn_param->private_data,
2697 conn_param->private_data, conn_param->private_data_len); 2699 conn_param->private_data_len);
2698 2700
2699 if (req.private_data) { 2701 if (private_data) {
2700 ret = cma_format_hdr((void *) req.private_data, id_priv); 2702 ret = cma_format_hdr(private_data, id_priv);
2701 if (ret) 2703 if (ret)
2702 goto out; 2704 goto out;
2705 req.private_data = private_data;
2703 } 2706 }
2704 2707
2705 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 2708 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
@@ -2721,7 +2724,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2721 id_priv->cm_id.ib = NULL; 2724 id_priv->cm_id.ib = NULL;
2722 } 2725 }
2723out: 2726out:
2724 kfree(req.private_data); 2727 kfree(private_data);
2725 return ret; 2728 return ret;
2726} 2729}
2727 2730
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index dc3fd1e8af07..4c837e66516b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2663,6 +2663,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2663 int ret, i; 2663 int ret, i;
2664 struct ib_qp_attr *attr; 2664 struct ib_qp_attr *attr;
2665 struct ib_qp *qp; 2665 struct ib_qp *qp;
2666 u16 pkey_index;
2666 2667
2667 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2668 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2668 if (!attr) { 2669 if (!attr) {
@@ -2670,6 +2671,11 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2670 return -ENOMEM; 2671 return -ENOMEM;
2671 } 2672 }
2672 2673
2674 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2675 IB_DEFAULT_PKEY_FULL, &pkey_index);
2676 if (ret)
2677 pkey_index = 0;
2678
2673 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2679 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2674 qp = port_priv->qp_info[i].qp; 2680 qp = port_priv->qp_info[i].qp;
2675 if (!qp) 2681 if (!qp)
@@ -2680,7 +2686,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2680 * one is needed for the Reset to Init transition 2686 * one is needed for the Reset to Init transition
2681 */ 2687 */
2682 attr->qp_state = IB_QPS_INIT; 2688 attr->qp_state = IB_QPS_INIT;
2683 attr->pkey_index = 0; 2689 attr->pkey_index = pkey_index;
2684 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 2690 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2685 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 2691 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2686 IB_QP_PKEY_INDEX | IB_QP_QKEY); 2692 IB_QP_PKEY_INDEX | IB_QP_QKEY);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e87f2201b220..d2283837d451 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -226,6 +226,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
226 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * 226 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
227 sizeof(struct t3_cqe)); 227 sizeof(struct t3_cqe));
228 uresp.memsize = mm->len; 228 uresp.memsize = mm->len;
229 uresp.reserved = 0;
229 resplen = sizeof uresp; 230 resplen = sizeof uresp;
230 } 231 }
231 if (ib_copy_to_udata(udata, &uresp, resplen)) { 232 if (ib_copy_to_udata(udata, &uresp, resplen)) {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 232040447e8a..a4975e1654a6 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1657,6 +1657,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1657 if (mm5) { 1657 if (mm5) {
1658 uresp.ma_sync_key = ucontext->key; 1658 uresp.ma_sync_key = ucontext->key;
1659 ucontext->key += PAGE_SIZE; 1659 ucontext->key += PAGE_SIZE;
1660 } else {
1661 uresp.ma_sync_key = 0;
1660 } 1662 }
1661 uresp.sq_key = ucontext->key; 1663 uresp.sq_key = ucontext->key;
1662 ucontext->key += PAGE_SIZE; 1664 ucontext->key += PAGE_SIZE;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 4d599cedbb0b..f2a3f48107e7 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1511,8 +1511,14 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1511 1511
1512 memset(&attr, 0, sizeof attr); 1512 memset(&attr, 0, sizeof attr);
1513 attr.qp_state = IB_QPS_INIT; 1513 attr.qp_state = IB_QPS_INIT;
1514 attr.pkey_index = 1514 ret = 0;
1515 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; 1515 if (create_tun)
1516 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1517 ctx->port, IB_DEFAULT_PKEY_FULL,
1518 &attr.pkey_index);
1519 if (ret || !create_tun)
1520 attr.pkey_index =
1521 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1516 attr.qkey = IB_QP1_QKEY; 1522 attr.qkey = IB_QP1_QKEY;
1517 attr.port_num = ctx->port; 1523 attr.port_num = ctx->port;
1518 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); 1524 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8000fff4d444..3f831de9a4d8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -619,7 +619,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
619 619
620 resp.tot_uuars = req.total_num_uuars; 620 resp.tot_uuars = req.total_num_uuars;
621 resp.num_ports = dev->mdev.caps.num_ports; 621 resp.num_ports = dev->mdev.caps.num_ports;
622 err = ib_copy_to_udata(udata, &resp, sizeof(resp)); 622 err = ib_copy_to_udata(udata, &resp,
623 sizeof(resp) - sizeof(resp.reserved));
623 if (err) 624 if (err)
624 goto out_uars; 625 goto out_uars;
625 626
@@ -1426,7 +1427,8 @@ static int init_one(struct pci_dev *pdev,
1426 if (err) 1427 if (err)
1427 goto err_eqs; 1428 goto err_eqs;
1428 1429
1429 if (ib_register_device(&dev->ib_dev, NULL)) 1430 err = ib_register_device(&dev->ib_dev, NULL);
1431 if (err)
1430 goto err_rsrc; 1432 goto err_rsrc;
1431 1433
1432 err = create_umr_res(dev); 1434 err = create_umr_res(dev);
@@ -1434,8 +1436,9 @@ static int init_one(struct pci_dev *pdev,
1434 goto err_dev; 1436 goto err_dev;
1435 1437
1436 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { 1438 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
1437 if (device_create_file(&dev->ib_dev.dev, 1439 err = device_create_file(&dev->ib_dev.dev,
1438 mlx5_class_attributes[i])) 1440 mlx5_class_attributes[i]);
1441 if (err)
1439 goto err_umrc; 1442 goto err_umrc;
1440 } 1443 }
1441 1444
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 16ac54c9819f..045f8cdbd303 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -199,7 +199,7 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
199 199
200static int sq_overhead(enum ib_qp_type qp_type) 200static int sq_overhead(enum ib_qp_type qp_type)
201{ 201{
202 int size; 202 int size = 0;
203 203
204 switch (qp_type) { 204 switch (qp_type) {
205 case IB_QPT_XRC_INI: 205 case IB_QPT_XRC_INI:
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 418004c93feb..90200245c5eb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3570,10 +3570,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3570 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; 3570 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
3571 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; 3571 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
3572 nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p," 3572 nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
3573 " Tcp state = %d, iWARP state = %d\n", 3573 " Tcp state = %s, iWARP state = %s\n",
3574 async_event_id, 3574 async_event_id,
3575 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, 3575 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
3576 tcp_state, iwarp_state); 3576 nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
3577 3577
3578 aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); 3578 aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
3579 if (aeq_info & NES_AEQE_QP) { 3579 if (aeq_info & NES_AEQE_QP) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 8f67fe2e91e6..5b53ca5a2284 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1384,6 +1384,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1384 1384
1385 if (ibpd->uobject) { 1385 if (ibpd->uobject) {
1386 uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index; 1386 uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
1387 uresp.mmap_rq_db_index = 0;
1387 uresp.actual_sq_size = sq_size; 1388 uresp.actual_sq_size = sq_size;
1388 uresp.actual_rq_size = rq_size; 1389 uresp.actual_rq_size = rq_size;
1389 uresp.qp_id = nesqp->hwqp.qp_id; 1390 uresp.qp_id = nesqp->hwqp.qp_id;
@@ -1767,7 +1768,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
1767 resp.cq_id = nescq->hw_cq.cq_number; 1768 resp.cq_id = nescq->hw_cq.cq_number;
1768 resp.cq_size = nescq->hw_cq.cq_size; 1769 resp.cq_size = nescq->hw_cq.cq_size;
1769 resp.mmap_db_index = 0; 1770 resp.mmap_db_index = 0;
1770 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 1771 if (ib_copy_to_udata(udata, &resp, sizeof resp - sizeof resp.reserved)) {
1771 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); 1772 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1772 kfree(nescq); 1773 kfree(nescq);
1773 return ERR_PTR(-EFAULT); 1774 return ERR_PTR(-EFAULT);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index a877a8ed7907..f4c587c68f64 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -29,7 +29,6 @@
29#include <net/netevent.h> 29#include <net/netevent.h>
30 30
31#include <rdma/ib_addr.h> 31#include <rdma/ib_addr.h>
32#include <rdma/ib_cache.h>
33 32
34#include "ocrdma.h" 33#include "ocrdma.h"
35#include "ocrdma_verbs.h" 34#include "ocrdma_verbs.h"
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index dcfbab177faa..f36630e4b6be 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -242,6 +242,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
242 memset(ctx->ah_tbl.va, 0, map_len); 242 memset(ctx->ah_tbl.va, 0, map_len);
243 ctx->ah_tbl.len = map_len; 243 ctx->ah_tbl.len = map_len;
244 244
245 memset(&resp, 0, sizeof(resp));
245 resp.ah_tbl_len = ctx->ah_tbl.len; 246 resp.ah_tbl_len = ctx->ah_tbl.len;
246 resp.ah_tbl_page = ctx->ah_tbl.pa; 247 resp.ah_tbl_page = ctx->ah_tbl.pa;
247 248
@@ -253,7 +254,6 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
253 resp.wqe_size = dev->attr.wqe_size; 254 resp.wqe_size = dev->attr.wqe_size;
254 resp.rqe_size = dev->attr.rqe_size; 255 resp.rqe_size = dev->attr.rqe_size;
255 resp.dpp_wqe_size = dev->attr.wqe_size; 256 resp.dpp_wqe_size = dev->attr.wqe_size;
256 resp.rsvd = 0;
257 257
258 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); 258 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
259 status = ib_copy_to_udata(udata, &resp, sizeof(resp)); 259 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -338,6 +338,7 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
338 struct ocrdma_alloc_pd_uresp rsp; 338 struct ocrdma_alloc_pd_uresp rsp;
339 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); 339 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
340 340
341 memset(&rsp, 0, sizeof(rsp));
341 rsp.id = pd->id; 342 rsp.id = pd->id;
342 rsp.dpp_enabled = pd->dpp_enabled; 343 rsp.dpp_enabled = pd->dpp_enabled;
343 db_page_addr = pd->dev->nic_info.unmapped_db + 344 db_page_addr = pd->dev->nic_info.unmapped_db +
@@ -692,6 +693,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
692 struct ocrdma_ucontext *uctx; 693 struct ocrdma_ucontext *uctx;
693 struct ocrdma_create_cq_uresp uresp; 694 struct ocrdma_create_cq_uresp uresp;
694 695
696 memset(&uresp, 0, sizeof(uresp));
695 uresp.cq_id = cq->id; 697 uresp.cq_id = cq->id;
696 uresp.page_size = cq->len; 698 uresp.page_size = cq->len;
697 uresp.num_pages = 1; 699 uresp.num_pages = 1;
@@ -1460,6 +1462,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
1460 int status; 1462 int status;
1461 struct ocrdma_create_srq_uresp uresp; 1463 struct ocrdma_create_srq_uresp uresp;
1462 1464
1465 memset(&uresp, 0, sizeof(uresp));
1463 uresp.rq_dbid = srq->rq.dbid; 1466 uresp.rq_dbid = srq->rq.dbid;
1464 uresp.num_rq_pages = 1; 1467 uresp.num_rq_pages = 1;
1465 uresp.rq_page_addr[0] = srq->rq.pa; 1468 uresp.rq_page_addr[0] = srq->rq.pa;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 21e8b09d4bf8..016e7429adf6 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1596,6 +1596,8 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1596 struct qib_devdata *dd = ppd->dd; 1596 struct qib_devdata *dd = ppd->dd;
1597 1597
1598 errs &= QIB_E_P_SDMAERRS; 1598 errs &= QIB_E_P_SDMAERRS;
1599 err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1600 errs, qib_7322p_error_msgs);
1599 1601
1600 if (errs & QIB_E_P_SDMAUNEXPDATA) 1602 if (errs & QIB_E_P_SDMAUNEXPDATA)
1601 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, 1603 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 32162d355370..9b5322d8cd5a 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -717,7 +717,7 @@ void dump_sdma_state(struct qib_pportdata *ppd)
717 struct qib_sdma_txreq *txp, *txpnext; 717 struct qib_sdma_txreq *txp, *txpnext;
718 __le64 *descqp; 718 __le64 *descqp;
719 u64 desc[2]; 719 u64 desc[2];
720 dma_addr_t addr; 720 u64 addr;
721 u16 gen, dwlen, dwoffset; 721 u16 gen, dwlen, dwoffset;
722 u16 head, tail, cnt; 722 u16 head, tail, cnt;
723 723
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 2cfa76f5d99e..196b1d13cbcb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -932,12 +932,47 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
932 return 0; 932 return 0;
933} 933}
934 934
935/*
936 * Takes whatever value which is in pkey index 0 and updates priv->pkey
937 * returns 0 if the pkey value was changed.
938 */
939static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
940{
941 int result;
942 u16 prev_pkey;
943
944 prev_pkey = priv->pkey;
945 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
946 if (result) {
947 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
948 priv->port, result);
949 return result;
950 }
951
952 priv->pkey |= 0x8000;
953
954 if (prev_pkey != priv->pkey) {
955 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
956 prev_pkey, priv->pkey);
957 /*
958 * Update the pkey in the broadcast address, while making sure to set
959 * the full membership bit, so that we join the right broadcast group.
960 */
961 priv->dev->broadcast[8] = priv->pkey >> 8;
962 priv->dev->broadcast[9] = priv->pkey & 0xff;
963 return 0;
964 }
965
966 return 1;
967}
968
935static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 969static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
936 enum ipoib_flush_level level) 970 enum ipoib_flush_level level)
937{ 971{
938 struct ipoib_dev_priv *cpriv; 972 struct ipoib_dev_priv *cpriv;
939 struct net_device *dev = priv->dev; 973 struct net_device *dev = priv->dev;
940 u16 new_index; 974 u16 new_index;
975 int result;
941 976
942 mutex_lock(&priv->vlan_mutex); 977 mutex_lock(&priv->vlan_mutex);
943 978
@@ -951,6 +986,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
951 mutex_unlock(&priv->vlan_mutex); 986 mutex_unlock(&priv->vlan_mutex);
952 987
953 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { 988 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
989 /* for non-child devices must check/update the pkey value here */
990 if (level == IPOIB_FLUSH_HEAVY &&
991 !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
992 update_parent_pkey(priv);
954 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 993 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
955 return; 994 return;
956 } 995 }
@@ -961,21 +1000,32 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
961 } 1000 }
962 1001
963 if (level == IPOIB_FLUSH_HEAVY) { 1002 if (level == IPOIB_FLUSH_HEAVY) {
964 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { 1003 /* child devices chase their origin pkey value, while non-child
965 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 1004 * (parent) devices should always takes what present in pkey index 0
966 ipoib_ib_dev_down(dev, 0); 1005 */
967 ipoib_ib_dev_stop(dev, 0); 1006 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
968 if (ipoib_pkey_dev_delay_open(dev)) 1007 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
1008 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1009 ipoib_ib_dev_down(dev, 0);
1010 ipoib_ib_dev_stop(dev, 0);
1011 if (ipoib_pkey_dev_delay_open(dev))
1012 return;
1013 }
1014 /* restart QP only if P_Key index is changed */
1015 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
1016 new_index == priv->pkey_index) {
1017 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
969 return; 1018 return;
1019 }
1020 priv->pkey_index = new_index;
1021 } else {
1022 result = update_parent_pkey(priv);
1023 /* restart QP only if P_Key value changed */
1024 if (result) {
1025 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1026 return;
1027 }
970 } 1028 }
971
972 /* restart QP only if P_Key index is changed */
973 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
974 new_index == priv->pkey_index) {
975 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
976 return;
977 }
978 priv->pkey_index = new_index;
979 } 1029 }
980 1030
981 if (level == IPOIB_FLUSH_LIGHT) { 1031 if (level == IPOIB_FLUSH_LIGHT) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b6e049a3c7a8..c6f71a88c55c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1461,7 +1461,7 @@ static ssize_t create_child(struct device *dev,
1461 if (sscanf(buf, "%i", &pkey) != 1) 1461 if (sscanf(buf, "%i", &pkey) != 1)
1462 return -EINVAL; 1462 return -EINVAL;
1463 1463
1464 if (pkey < 0 || pkey > 0xffff) 1464 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
1465 return -EINVAL; 1465 return -EINVAL;
1466 1466
1467 /* 1467 /*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 74685936c948..f81abe16cf09 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -119,6 +119,15 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
119 } else 119 } else
120 child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]); 120 child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
121 121
122 if (child_pkey == 0 || child_pkey == 0x8000)
123 return -EINVAL;
124
125 /*
126 * Set the full membership bit, so that we join the right
127 * broadcast group, etc.
128 */
129 child_pkey |= 0x8000;
130
122 err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD); 131 err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
123 132
124 if (!err && data) 133 if (!err && data)
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 0b9a79b2f48a..82fc86a90c1a 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -439,15 +439,15 @@ static void backside_setup_pid(void)
439 439
440/* Slots fan */ 440/* Slots fan */
441static const struct wf_pid_param slots_param = { 441static const struct wf_pid_param slots_param = {
442 .interval = 5, 442 .interval = 1,
443 .history_len = 2, 443 .history_len = 20,
444 .gd = 30 << 20, 444 .gd = 0,
445 .gp = 5 << 20, 445 .gp = 0,
446 .gr = 0, 446 .gr = 0x00100000,
447 .itarget = 40 << 16, 447 .itarget = 3200000,
448 .additive = 1, 448 .additive = 0,
449 .min = 300, 449 .min = 20,
450 .max = 4000, 450 .max = 100,
451}; 451};
452 452
453static void slots_fan_tick(void) 453static void slots_fan_tick(void)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 048f2947e08b..e45f5575fd4d 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -63,7 +63,10 @@
63#include "bcache.h" 63#include "bcache.h"
64#include "btree.h" 64#include "btree.h"
65 65
66#include <linux/freezer.h>
67#include <linux/kthread.h>
66#include <linux/random.h> 68#include <linux/random.h>
69#include <trace/events/bcache.h>
67 70
68#define MAX_IN_FLIGHT_DISCARDS 8U 71#define MAX_IN_FLIGHT_DISCARDS 8U
69 72
@@ -151,7 +154,7 @@ static void discard_finish(struct work_struct *w)
151 mutex_unlock(&ca->set->bucket_lock); 154 mutex_unlock(&ca->set->bucket_lock);
152 155
153 closure_wake_up(&ca->set->bucket_wait); 156 closure_wake_up(&ca->set->bucket_wait);
154 wake_up(&ca->set->alloc_wait); 157 wake_up_process(ca->alloc_thread);
155 158
156 closure_put(&ca->set->cl); 159 closure_put(&ca->set->cl);
157} 160}
@@ -350,38 +353,30 @@ static void invalidate_buckets(struct cache *ca)
350 break; 353 break;
351 } 354 }
352 355
353 pr_debug("free %zu/%zu free_inc %zu/%zu unused %zu/%zu", 356 trace_bcache_alloc_invalidate(ca);
354 fifo_used(&ca->free), ca->free.size,
355 fifo_used(&ca->free_inc), ca->free_inc.size,
356 fifo_used(&ca->unused), ca->unused.size);
357} 357}
358 358
359#define allocator_wait(ca, cond) \ 359#define allocator_wait(ca, cond) \
360do { \ 360do { \
361 DEFINE_WAIT(__wait); \
362 \
363 while (1) { \ 361 while (1) { \
364 prepare_to_wait(&ca->set->alloc_wait, \ 362 set_current_state(TASK_INTERRUPTIBLE); \
365 &__wait, TASK_INTERRUPTIBLE); \
366 if (cond) \ 363 if (cond) \
367 break; \ 364 break; \
368 \ 365 \
369 mutex_unlock(&(ca)->set->bucket_lock); \ 366 mutex_unlock(&(ca)->set->bucket_lock); \
370 if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ 367 if (kthread_should_stop()) \
371 finish_wait(&ca->set->alloc_wait, &__wait); \ 368 return 0; \
372 closure_return(cl); \
373 } \
374 \ 369 \
370 try_to_freeze(); \
375 schedule(); \ 371 schedule(); \
376 mutex_lock(&(ca)->set->bucket_lock); \ 372 mutex_lock(&(ca)->set->bucket_lock); \
377 } \ 373 } \
378 \ 374 __set_current_state(TASK_RUNNING); \
379 finish_wait(&ca->set->alloc_wait, &__wait); \
380} while (0) 375} while (0)
381 376
382void bch_allocator_thread(struct closure *cl) 377static int bch_allocator_thread(void *arg)
383{ 378{
384 struct cache *ca = container_of(cl, struct cache, alloc); 379 struct cache *ca = arg;
385 380
386 mutex_lock(&ca->set->bucket_lock); 381 mutex_lock(&ca->set->bucket_lock);
387 382
@@ -442,7 +437,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
442{ 437{
443 long r = -1; 438 long r = -1;
444again: 439again:
445 wake_up(&ca->set->alloc_wait); 440 wake_up_process(ca->alloc_thread);
446 441
447 if (fifo_used(&ca->free) > ca->watermark[watermark] && 442 if (fifo_used(&ca->free) > ca->watermark[watermark] &&
448 fifo_pop(&ca->free, r)) { 443 fifo_pop(&ca->free, r)) {
@@ -476,9 +471,7 @@ again:
476 return r; 471 return r;
477 } 472 }
478 473
479 pr_debug("alloc failure: blocked %i free %zu free_inc %zu unused %zu", 474 trace_bcache_alloc_fail(ca);
480 atomic_read(&ca->set->prio_blocked), fifo_used(&ca->free),
481 fifo_used(&ca->free_inc), fifo_used(&ca->unused));
482 475
483 if (cl) { 476 if (cl) {
484 closure_wait(&ca->set->bucket_wait, cl); 477 closure_wait(&ca->set->bucket_wait, cl);
@@ -552,6 +545,17 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
552 545
553/* Init */ 546/* Init */
554 547
548int bch_cache_allocator_start(struct cache *ca)
549{
550 struct task_struct *k = kthread_run(bch_allocator_thread,
551 ca, "bcache_allocator");
552 if (IS_ERR(k))
553 return PTR_ERR(k);
554
555 ca->alloc_thread = k;
556 return 0;
557}
558
555void bch_cache_allocator_exit(struct cache *ca) 559void bch_cache_allocator_exit(struct cache *ca)
556{ 560{
557 struct discard *d; 561 struct discard *d;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index d3e15b42a4ab..b39f6f0b45f2 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -178,7 +178,6 @@
178#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ 178#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
179 179
180#include <linux/bio.h> 180#include <linux/bio.h>
181#include <linux/blktrace_api.h>
182#include <linux/kobject.h> 181#include <linux/kobject.h>
183#include <linux/list.h> 182#include <linux/list.h>
184#include <linux/mutex.h> 183#include <linux/mutex.h>
@@ -388,8 +387,6 @@ struct keybuf_key {
388typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); 387typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
389 388
390struct keybuf { 389struct keybuf {
391 keybuf_pred_fn *key_predicate;
392
393 struct bkey last_scanned; 390 struct bkey last_scanned;
394 spinlock_t lock; 391 spinlock_t lock;
395 392
@@ -437,9 +434,12 @@ struct bcache_device {
437 434
438 /* If nonzero, we're detaching/unregistering from cache set */ 435 /* If nonzero, we're detaching/unregistering from cache set */
439 atomic_t detaching; 436 atomic_t detaching;
437 int flush_done;
438
439 uint64_t nr_stripes;
440 unsigned stripe_size_bits;
441 atomic_t *stripe_sectors_dirty;
440 442
441 atomic_long_t sectors_dirty;
442 unsigned long sectors_dirty_gc;
443 unsigned long sectors_dirty_last; 443 unsigned long sectors_dirty_last;
444 long sectors_dirty_derivative; 444 long sectors_dirty_derivative;
445 445
@@ -531,6 +531,7 @@ struct cached_dev {
531 unsigned sequential_merge:1; 531 unsigned sequential_merge:1;
532 unsigned verify:1; 532 unsigned verify:1;
533 533
534 unsigned partial_stripes_expensive:1;
534 unsigned writeback_metadata:1; 535 unsigned writeback_metadata:1;
535 unsigned writeback_running:1; 536 unsigned writeback_running:1;
536 unsigned char writeback_percent; 537 unsigned char writeback_percent;
@@ -565,8 +566,7 @@ struct cache {
565 566
566 unsigned watermark[WATERMARK_MAX]; 567 unsigned watermark[WATERMARK_MAX];
567 568
568 struct closure alloc; 569 struct task_struct *alloc_thread;
569 struct workqueue_struct *alloc_workqueue;
570 570
571 struct closure prio; 571 struct closure prio;
572 struct prio_set *disk_buckets; 572 struct prio_set *disk_buckets;
@@ -664,13 +664,9 @@ struct gc_stat {
664 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; 664 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
665 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. 665 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
666 * flushing dirty data). 666 * flushing dirty data).
667 *
668 * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
669 * the allocation thread.
670 */ 667 */
671#define CACHE_SET_UNREGISTERING 0 668#define CACHE_SET_UNREGISTERING 0
672#define CACHE_SET_STOPPING 1 669#define CACHE_SET_STOPPING 1
673#define CACHE_SET_STOPPING_2 2
674 670
675struct cache_set { 671struct cache_set {
676 struct closure cl; 672 struct closure cl;
@@ -703,9 +699,6 @@ struct cache_set {
703 /* For the btree cache */ 699 /* For the btree cache */
704 struct shrinker shrink; 700 struct shrinker shrink;
705 701
706 /* For the allocator itself */
707 wait_queue_head_t alloc_wait;
708
709 /* For the btree cache and anything allocation related */ 702 /* For the btree cache and anything allocation related */
710 struct mutex bucket_lock; 703 struct mutex bucket_lock;
711 704
@@ -823,10 +816,9 @@ struct cache_set {
823 816
824 /* 817 /*
825 * A btree node on disk could have too many bsets for an iterator to fit 818 * A btree node on disk could have too many bsets for an iterator to fit
826 * on the stack - this is a single element mempool for btree_read_work() 819 * on the stack - have to dynamically allocate them
827 */ 820 */
828 struct mutex fill_lock; 821 mempool_t *fill_iter;
829 struct btree_iter *fill_iter;
830 822
831 /* 823 /*
832 * btree_sort() is a merge sort and requires temporary space - single 824 * btree_sort() is a merge sort and requires temporary space - single
@@ -834,6 +826,7 @@ struct cache_set {
834 */ 826 */
835 struct mutex sort_lock; 827 struct mutex sort_lock;
836 struct bset *sort; 828 struct bset *sort;
829 unsigned sort_crit_factor;
837 830
838 /* List of buckets we're currently writing data to */ 831 /* List of buckets we're currently writing data to */
839 struct list_head data_buckets; 832 struct list_head data_buckets;
@@ -906,8 +899,6 @@ static inline unsigned local_clock_us(void)
906 return local_clock() >> 10; 899 return local_clock() >> 10;
907} 900}
908 901
909#define MAX_BSETS 4U
910
911#define BTREE_PRIO USHRT_MAX 902#define BTREE_PRIO USHRT_MAX
912#define INITIAL_PRIO 32768 903#define INITIAL_PRIO 32768
913 904
@@ -1112,23 +1103,6 @@ static inline void __bkey_put(struct cache_set *c, struct bkey *k)
1112 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); 1103 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
1113} 1104}
1114 1105
1115/* Blktrace macros */
1116
1117#define blktrace_msg(c, fmt, ...) \
1118do { \
1119 struct request_queue *q = bdev_get_queue(c->bdev); \
1120 if (q) \
1121 blk_add_trace_msg(q, fmt, ##__VA_ARGS__); \
1122} while (0)
1123
1124#define blktrace_msg_all(s, fmt, ...) \
1125do { \
1126 struct cache *_c; \
1127 unsigned i; \
1128 for_each_cache(_c, (s), i) \
1129 blktrace_msg(_c, fmt, ##__VA_ARGS__); \
1130} while (0)
1131
1132static inline void cached_dev_put(struct cached_dev *dc) 1106static inline void cached_dev_put(struct cached_dev *dc)
1133{ 1107{
1134 if (atomic_dec_and_test(&dc->count)) 1108 if (atomic_dec_and_test(&dc->count))
@@ -1173,10 +1147,16 @@ static inline uint8_t bucket_disk_gen(struct bucket *b)
1173 static struct kobj_attribute ksysfs_##n = \ 1147 static struct kobj_attribute ksysfs_##n = \
1174 __ATTR(n, S_IWUSR|S_IRUSR, show, store) 1148 __ATTR(n, S_IWUSR|S_IRUSR, show, store)
1175 1149
1176/* Forward declarations */ 1150static inline void wake_up_allocators(struct cache_set *c)
1151{
1152 struct cache *ca;
1153 unsigned i;
1154
1155 for_each_cache(ca, c, i)
1156 wake_up_process(ca->alloc_thread);
1157}
1177 1158
1178void bch_writeback_queue(struct cached_dev *); 1159/* Forward declarations */
1179void bch_writeback_add(struct cached_dev *, unsigned);
1180 1160
1181void bch_count_io_errors(struct cache *, int, const char *); 1161void bch_count_io_errors(struct cache *, int, const char *);
1182void bch_bbio_count_io_errors(struct cache_set *, struct bio *, 1162void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
@@ -1193,7 +1173,6 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
1193uint8_t bch_inc_gen(struct cache *, struct bucket *); 1173uint8_t bch_inc_gen(struct cache *, struct bucket *);
1194void bch_rescale_priorities(struct cache_set *, int); 1174void bch_rescale_priorities(struct cache_set *, int);
1195bool bch_bucket_add_unused(struct cache *, struct bucket *); 1175bool bch_bucket_add_unused(struct cache *, struct bucket *);
1196void bch_allocator_thread(struct closure *);
1197 1176
1198long bch_bucket_alloc(struct cache *, unsigned, struct closure *); 1177long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
1199void bch_bucket_free(struct cache_set *, struct bkey *); 1178void bch_bucket_free(struct cache_set *, struct bkey *);
@@ -1241,9 +1220,9 @@ void bch_cache_set_stop(struct cache_set *);
1241struct cache_set *bch_cache_set_alloc(struct cache_sb *); 1220struct cache_set *bch_cache_set_alloc(struct cache_sb *);
1242void bch_btree_cache_free(struct cache_set *); 1221void bch_btree_cache_free(struct cache_set *);
1243int bch_btree_cache_alloc(struct cache_set *); 1222int bch_btree_cache_alloc(struct cache_set *);
1244void bch_cached_dev_writeback_init(struct cached_dev *);
1245void bch_moving_init_cache_set(struct cache_set *); 1223void bch_moving_init_cache_set(struct cache_set *);
1246 1224
1225int bch_cache_allocator_start(struct cache *ca);
1247void bch_cache_allocator_exit(struct cache *ca); 1226void bch_cache_allocator_exit(struct cache *ca);
1248int bch_cache_allocator_init(struct cache *ca); 1227int bch_cache_allocator_init(struct cache *ca);
1249 1228
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 1d27d3af3251..8010eed06a51 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -78,6 +78,7 @@ struct bkey *bch_keylist_pop(struct keylist *l)
78bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) 78bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
79{ 79{
80 unsigned i; 80 unsigned i;
81 char buf[80];
81 82
82 if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))) 83 if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
83 goto bad; 84 goto bad;
@@ -102,7 +103,8 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
102 103
103 return false; 104 return false;
104bad: 105bad:
105 cache_bug(c, "spotted bad key %s: %s", pkey(k), bch_ptr_status(c, k)); 106 bch_bkey_to_text(buf, sizeof(buf), k);
107 cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k));
106 return true; 108 return true;
107} 109}
108 110
@@ -162,10 +164,16 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
162#ifdef CONFIG_BCACHE_EDEBUG 164#ifdef CONFIG_BCACHE_EDEBUG
163bug: 165bug:
164 mutex_unlock(&b->c->bucket_lock); 166 mutex_unlock(&b->c->bucket_lock);
165 btree_bug(b, 167
168 {
169 char buf[80];
170
171 bch_bkey_to_text(buf, sizeof(buf), k);
172 btree_bug(b,
166"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", 173"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
167 pkey(k), PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), 174 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
168 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); 175 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
176 }
169 return true; 177 return true;
170#endif 178#endif
171} 179}
@@ -1084,33 +1092,39 @@ void bch_btree_sort_into(struct btree *b, struct btree *new)
1084 new->sets->size = 0; 1092 new->sets->size = 0;
1085} 1093}
1086 1094
1095#define SORT_CRIT (4096 / sizeof(uint64_t))
1096
1087void bch_btree_sort_lazy(struct btree *b) 1097void bch_btree_sort_lazy(struct btree *b)
1088{ 1098{
1089 if (b->nsets) { 1099 unsigned crit = SORT_CRIT;
1090 unsigned i, j, keys = 0, total; 1100 int i;
1091 1101
1092 for (i = 0; i <= b->nsets; i++) 1102 /* Don't sort if nothing to do */
1093 keys += b->sets[i].data->keys; 1103 if (!b->nsets)
1094 1104 goto out;
1095 total = keys;
1096 1105
1097 for (j = 0; j < b->nsets; j++) { 1106 /* If not a leaf node, always sort */
1098 if (keys * 2 < total || 1107 if (b->level) {
1099 keys < 1000) { 1108 bch_btree_sort(b);
1100 bch_btree_sort_partial(b, j); 1109 return;
1101 return; 1110 }
1102 }
1103 1111
1104 keys -= b->sets[j].data->keys; 1112 for (i = b->nsets - 1; i >= 0; --i) {
1105 } 1113 crit *= b->c->sort_crit_factor;
1106 1114
1107 /* Must sort if b->nsets == 3 or we'll overflow */ 1115 if (b->sets[i].data->keys < crit) {
1108 if (b->nsets >= (MAX_BSETS - 1) - b->level) { 1116 bch_btree_sort_partial(b, i);
1109 bch_btree_sort(b);
1110 return; 1117 return;
1111 } 1118 }
1112 } 1119 }
1113 1120
1121 /* Sort if we'd overflow */
1122 if (b->nsets + 1 == MAX_BSETS) {
1123 bch_btree_sort(b);
1124 return;
1125 }
1126
1127out:
1114 bset_build_written_tree(b); 1128 bset_build_written_tree(b);
1115} 1129}
1116 1130
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 57a9cff41546..ae115a253d73 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -1,6 +1,8 @@
1#ifndef _BCACHE_BSET_H 1#ifndef _BCACHE_BSET_H
2#define _BCACHE_BSET_H 2#define _BCACHE_BSET_H
3 3
4#include <linux/slab.h>
5
4/* 6/*
5 * BKEYS: 7 * BKEYS:
6 * 8 *
@@ -142,6 +144,8 @@
142 144
143/* Btree key comparison/iteration */ 145/* Btree key comparison/iteration */
144 146
147#define MAX_BSETS 4U
148
145struct btree_iter { 149struct btree_iter {
146 size_t size, used; 150 size_t size, used;
147 struct btree_iter_set { 151 struct btree_iter_set {
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 7a5658f04e62..ee372884c405 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -24,6 +24,7 @@
24#include "btree.h" 24#include "btree.h"
25#include "debug.h" 25#include "debug.h"
26#include "request.h" 26#include "request.h"
27#include "writeback.h"
27 28
28#include <linux/slab.h> 29#include <linux/slab.h>
29#include <linux/bitops.h> 30#include <linux/bitops.h>
@@ -134,44 +135,17 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i)
134 return crc ^ 0xffffffffffffffffULL; 135 return crc ^ 0xffffffffffffffffULL;
135} 136}
136 137
137static void btree_bio_endio(struct bio *bio, int error) 138static void bch_btree_node_read_done(struct btree *b)
138{ 139{
139 struct closure *cl = bio->bi_private;
140 struct btree *b = container_of(cl, struct btree, io.cl);
141
142 if (error)
143 set_btree_node_io_error(b);
144
145 bch_bbio_count_io_errors(b->c, bio, error, (bio->bi_rw & WRITE)
146 ? "writing btree" : "reading btree");
147 closure_put(cl);
148}
149
150static void btree_bio_init(struct btree *b)
151{
152 BUG_ON(b->bio);
153 b->bio = bch_bbio_alloc(b->c);
154
155 b->bio->bi_end_io = btree_bio_endio;
156 b->bio->bi_private = &b->io.cl;
157}
158
159void bch_btree_read_done(struct closure *cl)
160{
161 struct btree *b = container_of(cl, struct btree, io.cl);
162 struct bset *i = b->sets[0].data;
163 struct btree_iter *iter = b->c->fill_iter;
164 const char *err = "bad btree header"; 140 const char *err = "bad btree header";
165 BUG_ON(b->nsets || b->written); 141 struct bset *i = b->sets[0].data;
166 142 struct btree_iter *iter;
167 bch_bbio_free(b->bio, b->c);
168 b->bio = NULL;
169 143
170 mutex_lock(&b->c->fill_lock); 144 iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
145 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
171 iter->used = 0; 146 iter->used = 0;
172 147
173 if (btree_node_io_error(b) || 148 if (!i->seq)
174 !i->seq)
175 goto err; 149 goto err;
176 150
177 for (; 151 for (;
@@ -228,17 +202,8 @@ void bch_btree_read_done(struct closure *cl)
228 if (b->written < btree_blocks(b)) 202 if (b->written < btree_blocks(b))
229 bch_bset_init_next(b); 203 bch_bset_init_next(b);
230out: 204out:
231 205 mempool_free(iter, b->c->fill_iter);
232 mutex_unlock(&b->c->fill_lock); 206 return;
233
234 spin_lock(&b->c->btree_read_time_lock);
235 bch_time_stats_update(&b->c->btree_read_time, b->io_start_time);
236 spin_unlock(&b->c->btree_read_time_lock);
237
238 smp_wmb(); /* read_done is our write lock */
239 set_btree_node_read_done(b);
240
241 closure_return(cl);
242err: 207err:
243 set_btree_node_io_error(b); 208 set_btree_node_io_error(b);
244 bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys", 209 bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
@@ -247,48 +212,69 @@ err:
247 goto out; 212 goto out;
248} 213}
249 214
250void bch_btree_read(struct btree *b) 215static void btree_node_read_endio(struct bio *bio, int error)
216{
217 struct closure *cl = bio->bi_private;
218 closure_put(cl);
219}
220
221void bch_btree_node_read(struct btree *b)
251{ 222{
252 BUG_ON(b->nsets || b->written); 223 uint64_t start_time = local_clock();
224 struct closure cl;
225 struct bio *bio;
226
227 trace_bcache_btree_read(b);
228
229 closure_init_stack(&cl);
230
231 bio = bch_bbio_alloc(b->c);
232 bio->bi_rw = REQ_META|READ_SYNC;
233 bio->bi_size = KEY_SIZE(&b->key) << 9;
234 bio->bi_end_io = btree_node_read_endio;
235 bio->bi_private = &cl;
236
237 bch_bio_map(bio, b->sets[0].data);
238
239 bch_submit_bbio(bio, b->c, &b->key, 0);
240 closure_sync(&cl);
253 241
254 if (!closure_trylock(&b->io.cl, &b->c->cl)) 242 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
255 BUG(); 243 set_btree_node_io_error(b);
256 244
257 b->io_start_time = local_clock(); 245 bch_bbio_free(bio, b->c);
258 246
259 btree_bio_init(b); 247 if (btree_node_io_error(b))
260 b->bio->bi_rw = REQ_META|READ_SYNC; 248 goto err;
261 b->bio->bi_size = KEY_SIZE(&b->key) << 9;
262 249
263 bch_bio_map(b->bio, b->sets[0].data); 250 bch_btree_node_read_done(b);
264 251
265 pr_debug("%s", pbtree(b)); 252 spin_lock(&b->c->btree_read_time_lock);
266 trace_bcache_btree_read(b->bio); 253 bch_time_stats_update(&b->c->btree_read_time, start_time);
267 bch_submit_bbio(b->bio, b->c, &b->key, 0); 254 spin_unlock(&b->c->btree_read_time_lock);
268 255
269 continue_at(&b->io.cl, bch_btree_read_done, system_wq); 256 return;
257err:
258 bch_cache_set_error(b->c, "io error reading bucket %lu",
259 PTR_BUCKET_NR(b->c, &b->key, 0));
270} 260}
271 261
272static void btree_complete_write(struct btree *b, struct btree_write *w) 262static void btree_complete_write(struct btree *b, struct btree_write *w)
273{ 263{
274 if (w->prio_blocked && 264 if (w->prio_blocked &&
275 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) 265 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
276 wake_up(&b->c->alloc_wait); 266 wake_up_allocators(b->c);
277 267
278 if (w->journal) { 268 if (w->journal) {
279 atomic_dec_bug(w->journal); 269 atomic_dec_bug(w->journal);
280 __closure_wake_up(&b->c->journal.wait); 270 __closure_wake_up(&b->c->journal.wait);
281 } 271 }
282 272
283 if (w->owner)
284 closure_put(w->owner);
285
286 w->prio_blocked = 0; 273 w->prio_blocked = 0;
287 w->journal = NULL; 274 w->journal = NULL;
288 w->owner = NULL;
289} 275}
290 276
291static void __btree_write_done(struct closure *cl) 277static void __btree_node_write_done(struct closure *cl)
292{ 278{
293 struct btree *b = container_of(cl, struct btree, io.cl); 279 struct btree *b = container_of(cl, struct btree, io.cl);
294 struct btree_write *w = btree_prev_write(b); 280 struct btree_write *w = btree_prev_write(b);
@@ -304,7 +290,7 @@ static void __btree_write_done(struct closure *cl)
304 closure_return(cl); 290 closure_return(cl);
305} 291}
306 292
307static void btree_write_done(struct closure *cl) 293static void btree_node_write_done(struct closure *cl)
308{ 294{
309 struct btree *b = container_of(cl, struct btree, io.cl); 295 struct btree *b = container_of(cl, struct btree, io.cl);
310 struct bio_vec *bv; 296 struct bio_vec *bv;
@@ -313,10 +299,22 @@ static void btree_write_done(struct closure *cl)
313 __bio_for_each_segment(bv, b->bio, n, 0) 299 __bio_for_each_segment(bv, b->bio, n, 0)
314 __free_page(bv->bv_page); 300 __free_page(bv->bv_page);
315 301
316 __btree_write_done(cl); 302 __btree_node_write_done(cl);
317} 303}
318 304
319static void do_btree_write(struct btree *b) 305static void btree_node_write_endio(struct bio *bio, int error)
306{
307 struct closure *cl = bio->bi_private;
308 struct btree *b = container_of(cl, struct btree, io.cl);
309
310 if (error)
311 set_btree_node_io_error(b);
312
313 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
314 closure_put(cl);
315}
316
317static void do_btree_node_write(struct btree *b)
320{ 318{
321 struct closure *cl = &b->io.cl; 319 struct closure *cl = &b->io.cl;
322 struct bset *i = b->sets[b->nsets].data; 320 struct bset *i = b->sets[b->nsets].data;
@@ -325,15 +323,34 @@ static void do_btree_write(struct btree *b)
325 i->version = BCACHE_BSET_VERSION; 323 i->version = BCACHE_BSET_VERSION;
326 i->csum = btree_csum_set(b, i); 324 i->csum = btree_csum_set(b, i);
327 325
328 btree_bio_init(b); 326 BUG_ON(b->bio);
329 b->bio->bi_rw = REQ_META|WRITE_SYNC; 327 b->bio = bch_bbio_alloc(b->c);
330 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); 328
329 b->bio->bi_end_io = btree_node_write_endio;
330 b->bio->bi_private = &b->io.cl;
331 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
332 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
331 bch_bio_map(b->bio, i); 333 bch_bio_map(b->bio, i);
332 334
335 /*
336 * If we're appending to a leaf node, we don't technically need FUA -
337 * this write just needs to be persisted before the next journal write,
338 * which will be marked FLUSH|FUA.
339 *
340 * Similarly if we're writing a new btree root - the pointer is going to
341 * be in the next journal entry.
342 *
343 * But if we're writing a new btree node (that isn't a root) or
344 * appending to a non leaf btree node, we need either FUA or a flush
345 * when we write the parent with the new pointer. FUA is cheaper than a
346 * flush, and writes appending to leaf nodes aren't blocking anything so
347 * just make all btree node writes FUA to keep things sane.
348 */
349
333 bkey_copy(&k.key, &b->key); 350 bkey_copy(&k.key, &b->key);
334 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i)); 351 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
335 352
336 if (!bch_bio_alloc_pages(b->bio, GFP_NOIO)) { 353 if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
337 int j; 354 int j;
338 struct bio_vec *bv; 355 struct bio_vec *bv;
339 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 356 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
@@ -342,40 +359,41 @@ static void do_btree_write(struct btree *b)
342 memcpy(page_address(bv->bv_page), 359 memcpy(page_address(bv->bv_page),
343 base + j * PAGE_SIZE, PAGE_SIZE); 360 base + j * PAGE_SIZE, PAGE_SIZE);
344 361
345 trace_bcache_btree_write(b->bio);
346 bch_submit_bbio(b->bio, b->c, &k.key, 0); 362 bch_submit_bbio(b->bio, b->c, &k.key, 0);
347 363
348 continue_at(cl, btree_write_done, NULL); 364 continue_at(cl, btree_node_write_done, NULL);
349 } else { 365 } else {
350 b->bio->bi_vcnt = 0; 366 b->bio->bi_vcnt = 0;
351 bch_bio_map(b->bio, i); 367 bch_bio_map(b->bio, i);
352 368
353 trace_bcache_btree_write(b->bio);
354 bch_submit_bbio(b->bio, b->c, &k.key, 0); 369 bch_submit_bbio(b->bio, b->c, &k.key, 0);
355 370
356 closure_sync(cl); 371 closure_sync(cl);
357 __btree_write_done(cl); 372 __btree_node_write_done(cl);
358 } 373 }
359} 374}
360 375
361static void __btree_write(struct btree *b) 376void bch_btree_node_write(struct btree *b, struct closure *parent)
362{ 377{
363 struct bset *i = b->sets[b->nsets].data; 378 struct bset *i = b->sets[b->nsets].data;
364 379
380 trace_bcache_btree_write(b);
381
365 BUG_ON(current->bio_list); 382 BUG_ON(current->bio_list);
383 BUG_ON(b->written >= btree_blocks(b));
384 BUG_ON(b->written && !i->keys);
385 BUG_ON(b->sets->data->seq != i->seq);
386 bch_check_key_order(b, i);
366 387
367 closure_lock(&b->io, &b->c->cl);
368 cancel_delayed_work(&b->work); 388 cancel_delayed_work(&b->work);
369 389
390 /* If caller isn't waiting for write, parent refcount is cache set */
391 closure_lock(&b->io, parent ?: &b->c->cl);
392
370 clear_bit(BTREE_NODE_dirty, &b->flags); 393 clear_bit(BTREE_NODE_dirty, &b->flags);
371 change_bit(BTREE_NODE_write_idx, &b->flags); 394 change_bit(BTREE_NODE_write_idx, &b->flags);
372 395
373 bch_check_key_order(b, i); 396 do_btree_node_write(b);
374 BUG_ON(b->written && !i->keys);
375
376 do_btree_write(b);
377
378 pr_debug("%s block %i keys %i", pbtree(b), b->written, i->keys);
379 397
380 b->written += set_blocks(i, b->c); 398 b->written += set_blocks(i, b->c);
381 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size, 399 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
@@ -387,37 +405,31 @@ static void __btree_write(struct btree *b)
387 bch_bset_init_next(b); 405 bch_bset_init_next(b);
388} 406}
389 407
390static void btree_write_work(struct work_struct *w) 408static void btree_node_write_work(struct work_struct *w)
391{ 409{
392 struct btree *b = container_of(to_delayed_work(w), struct btree, work); 410 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
393 411
394 down_write(&b->lock); 412 rw_lock(true, b, b->level);
395 413
396 if (btree_node_dirty(b)) 414 if (btree_node_dirty(b))
397 __btree_write(b); 415 bch_btree_node_write(b, NULL);
398 up_write(&b->lock); 416 rw_unlock(true, b);
399} 417}
400 418
401void bch_btree_write(struct btree *b, bool now, struct btree_op *op) 419static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
402{ 420{
403 struct bset *i = b->sets[b->nsets].data; 421 struct bset *i = b->sets[b->nsets].data;
404 struct btree_write *w = btree_current_write(b); 422 struct btree_write *w = btree_current_write(b);
405 423
406 BUG_ON(b->written && 424 BUG_ON(!b->written);
407 (b->written >= btree_blocks(b) || 425 BUG_ON(!i->keys);
408 i->seq != b->sets[0].data->seq ||
409 !i->keys));
410 426
411 if (!btree_node_dirty(b)) { 427 if (!btree_node_dirty(b))
412 set_btree_node_dirty(b); 428 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
413 queue_delayed_work(btree_io_wq, &b->work,
414 msecs_to_jiffies(30000));
415 }
416 429
417 w->prio_blocked += b->prio_blocked; 430 set_btree_node_dirty(b);
418 b->prio_blocked = 0;
419 431
420 if (op && op->journal && !b->level) { 432 if (op && op->journal) {
421 if (w->journal && 433 if (w->journal &&
422 journal_pin_cmp(b->c, w, op)) { 434 journal_pin_cmp(b->c, w, op)) {
423 atomic_dec_bug(w->journal); 435 atomic_dec_bug(w->journal);
@@ -430,23 +442,10 @@ void bch_btree_write(struct btree *b, bool now, struct btree_op *op)
430 } 442 }
431 } 443 }
432 444
433 if (current->bio_list)
434 return;
435
436 /* Force write if set is too big */ 445 /* Force write if set is too big */
437 if (now || 446 if (set_bytes(i) > PAGE_SIZE - 48 &&
438 b->level || 447 !current->bio_list)
439 set_bytes(i) > PAGE_SIZE - 48) { 448 bch_btree_node_write(b, NULL);
440 if (op && now) {
441 /* Must wait on multiple writes */
442 BUG_ON(w->owner);
443 w->owner = &op->cl;
444 closure_get(&op->cl);
445 }
446
447 __btree_write(b);
448 }
449 BUG_ON(!b->written);
450} 449}
451 450
452/* 451/*
@@ -559,7 +558,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
559 init_rwsem(&b->lock); 558 init_rwsem(&b->lock);
560 lockdep_set_novalidate_class(&b->lock); 559 lockdep_set_novalidate_class(&b->lock);
561 INIT_LIST_HEAD(&b->list); 560 INIT_LIST_HEAD(&b->list);
562 INIT_DELAYED_WORK(&b->work, btree_write_work); 561 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
563 b->c = c; 562 b->c = c;
564 closure_init_unlocked(&b->io); 563 closure_init_unlocked(&b->io);
565 564
@@ -582,7 +581,7 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
582 BUG_ON(btree_node_dirty(b) && !b->sets[0].data); 581 BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
583 582
584 if (cl && btree_node_dirty(b)) 583 if (cl && btree_node_dirty(b))
585 bch_btree_write(b, true, NULL); 584 bch_btree_node_write(b, NULL);
586 585
587 if (cl) 586 if (cl)
588 closure_wait_event_async(&b->io.wait, cl, 587 closure_wait_event_async(&b->io.wait, cl,
@@ -623,6 +622,13 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
623 else if (!mutex_trylock(&c->bucket_lock)) 622 else if (!mutex_trylock(&c->bucket_lock))
624 return -1; 623 return -1;
625 624
625 /*
626 * It's _really_ critical that we don't free too many btree nodes - we
627 * have to always leave ourselves a reserve. The reserve is how we
628 * guarantee that allocating memory for a new btree node can always
629 * succeed, so that inserting keys into the btree can always succeed and
630 * IO can always make forward progress:
631 */
626 nr /= c->btree_pages; 632 nr /= c->btree_pages;
627 nr = min_t(unsigned long, nr, mca_can_free(c)); 633 nr = min_t(unsigned long, nr, mca_can_free(c));
628 634
@@ -766,6 +772,8 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
766 int ret = -ENOMEM; 772 int ret = -ENOMEM;
767 struct btree *i; 773 struct btree *i;
768 774
775 trace_bcache_btree_cache_cannibalize(c);
776
769 if (!cl) 777 if (!cl)
770 return ERR_PTR(-ENOMEM); 778 return ERR_PTR(-ENOMEM);
771 779
@@ -784,7 +792,6 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
784 return ERR_PTR(-EAGAIN); 792 return ERR_PTR(-EAGAIN);
785 } 793 }
786 794
787 /* XXX: tracepoint */
788 c->try_harder = cl; 795 c->try_harder = cl;
789 c->try_harder_start = local_clock(); 796 c->try_harder_start = local_clock();
790retry: 797retry:
@@ -905,6 +912,9 @@ retry:
905 b = mca_find(c, k); 912 b = mca_find(c, k);
906 913
907 if (!b) { 914 if (!b) {
915 if (current->bio_list)
916 return ERR_PTR(-EAGAIN);
917
908 mutex_lock(&c->bucket_lock); 918 mutex_lock(&c->bucket_lock);
909 b = mca_alloc(c, k, level, &op->cl); 919 b = mca_alloc(c, k, level, &op->cl);
910 mutex_unlock(&c->bucket_lock); 920 mutex_unlock(&c->bucket_lock);
@@ -914,7 +924,7 @@ retry:
914 if (IS_ERR(b)) 924 if (IS_ERR(b))
915 return b; 925 return b;
916 926
917 bch_btree_read(b); 927 bch_btree_node_read(b);
918 928
919 if (!write) 929 if (!write)
920 downgrade_write(&b->lock); 930 downgrade_write(&b->lock);
@@ -937,15 +947,12 @@ retry:
937 for (; i <= b->nsets; i++) 947 for (; i <= b->nsets; i++)
938 prefetch(b->sets[i].data); 948 prefetch(b->sets[i].data);
939 949
940 if (!closure_wait_event(&b->io.wait, &op->cl, 950 if (btree_node_io_error(b)) {
941 btree_node_read_done(b))) {
942 rw_unlock(write, b);
943 b = ERR_PTR(-EAGAIN);
944 } else if (btree_node_io_error(b)) {
945 rw_unlock(write, b); 951 rw_unlock(write, b);
946 b = ERR_PTR(-EIO); 952 return ERR_PTR(-EIO);
947 } else 953 }
948 BUG_ON(!b->written); 954
955 BUG_ON(!b->written);
949 956
950 return b; 957 return b;
951} 958}
@@ -959,7 +966,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
959 mutex_unlock(&c->bucket_lock); 966 mutex_unlock(&c->bucket_lock);
960 967
961 if (!IS_ERR_OR_NULL(b)) { 968 if (!IS_ERR_OR_NULL(b)) {
962 bch_btree_read(b); 969 bch_btree_node_read(b);
963 rw_unlock(true, b); 970 rw_unlock(true, b);
964 } 971 }
965} 972}
@@ -970,24 +977,19 @@ static void btree_node_free(struct btree *b, struct btree_op *op)
970{ 977{
971 unsigned i; 978 unsigned i;
972 979
980 trace_bcache_btree_node_free(b);
981
973 /* 982 /*
974 * The BUG_ON() in btree_node_get() implies that we must have a write 983 * The BUG_ON() in btree_node_get() implies that we must have a write
975 * lock on parent to free or even invalidate a node 984 * lock on parent to free or even invalidate a node
976 */ 985 */
977 BUG_ON(op->lock <= b->level); 986 BUG_ON(op->lock <= b->level);
978 BUG_ON(b == b->c->root); 987 BUG_ON(b == b->c->root);
979 pr_debug("bucket %s", pbtree(b));
980 988
981 if (btree_node_dirty(b)) 989 if (btree_node_dirty(b))
982 btree_complete_write(b, btree_current_write(b)); 990 btree_complete_write(b, btree_current_write(b));
983 clear_bit(BTREE_NODE_dirty, &b->flags); 991 clear_bit(BTREE_NODE_dirty, &b->flags);
984 992
985 if (b->prio_blocked &&
986 !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked))
987 wake_up(&b->c->alloc_wait);
988
989 b->prio_blocked = 0;
990
991 cancel_delayed_work(&b->work); 993 cancel_delayed_work(&b->work);
992 994
993 mutex_lock(&b->c->bucket_lock); 995 mutex_lock(&b->c->bucket_lock);
@@ -1028,17 +1030,20 @@ retry:
1028 goto retry; 1030 goto retry;
1029 } 1031 }
1030 1032
1031 set_btree_node_read_done(b);
1032 b->accessed = 1; 1033 b->accessed = 1;
1033 bch_bset_init_next(b); 1034 bch_bset_init_next(b);
1034 1035
1035 mutex_unlock(&c->bucket_lock); 1036 mutex_unlock(&c->bucket_lock);
1037
1038 trace_bcache_btree_node_alloc(b);
1036 return b; 1039 return b;
1037err_free: 1040err_free:
1038 bch_bucket_free(c, &k.key); 1041 bch_bucket_free(c, &k.key);
1039 __bkey_put(c, &k.key); 1042 __bkey_put(c, &k.key);
1040err: 1043err:
1041 mutex_unlock(&c->bucket_lock); 1044 mutex_unlock(&c->bucket_lock);
1045
1046 trace_bcache_btree_node_alloc_fail(b);
1042 return b; 1047 return b;
1043} 1048}
1044 1049
@@ -1137,11 +1142,8 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
1137 gc->nkeys++; 1142 gc->nkeys++;
1138 1143
1139 gc->data += KEY_SIZE(k); 1144 gc->data += KEY_SIZE(k);
1140 if (KEY_DIRTY(k)) { 1145 if (KEY_DIRTY(k))
1141 gc->dirty += KEY_SIZE(k); 1146 gc->dirty += KEY_SIZE(k);
1142 if (d)
1143 d->sectors_dirty_gc += KEY_SIZE(k);
1144 }
1145 } 1147 }
1146 1148
1147 for (t = b->sets; t <= &b->sets[b->nsets]; t++) 1149 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
@@ -1166,14 +1168,11 @@ static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k,
1166 1168
1167 if (!IS_ERR_OR_NULL(n)) { 1169 if (!IS_ERR_OR_NULL(n)) {
1168 swap(b, n); 1170 swap(b, n);
1171 __bkey_put(b->c, &b->key);
1169 1172
1170 memcpy(k->ptr, b->key.ptr, 1173 memcpy(k->ptr, b->key.ptr,
1171 sizeof(uint64_t) * KEY_PTRS(&b->key)); 1174 sizeof(uint64_t) * KEY_PTRS(&b->key));
1172 1175
1173 __bkey_put(b->c, &b->key);
1174 atomic_inc(&b->c->prio_blocked);
1175 b->prio_blocked++;
1176
1177 btree_node_free(n, op); 1176 btree_node_free(n, op);
1178 up_write(&n->lock); 1177 up_write(&n->lock);
1179 } 1178 }
@@ -1278,7 +1277,7 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
1278 btree_node_free(r->b, op); 1277 btree_node_free(r->b, op);
1279 up_write(&r->b->lock); 1278 up_write(&r->b->lock);
1280 1279
1281 pr_debug("coalesced %u nodes", nodes); 1280 trace_bcache_btree_gc_coalesce(nodes);
1282 1281
1283 gc->nodes--; 1282 gc->nodes--;
1284 nodes--; 1283 nodes--;
@@ -1293,14 +1292,9 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1293 void write(struct btree *r) 1292 void write(struct btree *r)
1294 { 1293 {
1295 if (!r->written) 1294 if (!r->written)
1296 bch_btree_write(r, true, op); 1295 bch_btree_node_write(r, &op->cl);
1297 else if (btree_node_dirty(r)) { 1296 else if (btree_node_dirty(r))
1298 BUG_ON(btree_current_write(r)->owner); 1297 bch_btree_node_write(r, writes);
1299 btree_current_write(r)->owner = writes;
1300 closure_get(writes);
1301
1302 bch_btree_write(r, true, NULL);
1303 }
1304 1298
1305 up_write(&r->lock); 1299 up_write(&r->lock);
1306 } 1300 }
@@ -1386,9 +1380,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1386 ret = btree_gc_recurse(b, op, writes, gc); 1380 ret = btree_gc_recurse(b, op, writes, gc);
1387 1381
1388 if (!b->written || btree_node_dirty(b)) { 1382 if (!b->written || btree_node_dirty(b)) {
1389 atomic_inc(&b->c->prio_blocked); 1383 bch_btree_node_write(b, n ? &op->cl : NULL);
1390 b->prio_blocked++;
1391 bch_btree_write(b, true, n ? op : NULL);
1392 } 1384 }
1393 1385
1394 if (!IS_ERR_OR_NULL(n)) { 1386 if (!IS_ERR_OR_NULL(n)) {
@@ -1405,7 +1397,6 @@ static void btree_gc_start(struct cache_set *c)
1405{ 1397{
1406 struct cache *ca; 1398 struct cache *ca;
1407 struct bucket *b; 1399 struct bucket *b;
1408 struct bcache_device **d;
1409 unsigned i; 1400 unsigned i;
1410 1401
1411 if (!c->gc_mark_valid) 1402 if (!c->gc_mark_valid)
@@ -1419,16 +1410,12 @@ static void btree_gc_start(struct cache_set *c)
1419 for_each_cache(ca, c, i) 1410 for_each_cache(ca, c, i)
1420 for_each_bucket(b, ca) { 1411 for_each_bucket(b, ca) {
1421 b->gc_gen = b->gen; 1412 b->gc_gen = b->gen;
1422 if (!atomic_read(&b->pin)) 1413 if (!atomic_read(&b->pin)) {
1423 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 1414 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
1415 SET_GC_SECTORS_USED(b, 0);
1416 }
1424 } 1417 }
1425 1418
1426 for (d = c->devices;
1427 d < c->devices + c->nr_uuids;
1428 d++)
1429 if (*d)
1430 (*d)->sectors_dirty_gc = 0;
1431
1432 mutex_unlock(&c->bucket_lock); 1419 mutex_unlock(&c->bucket_lock);
1433} 1420}
1434 1421
@@ -1437,7 +1424,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
1437 size_t available = 0; 1424 size_t available = 0;
1438 struct bucket *b; 1425 struct bucket *b;
1439 struct cache *ca; 1426 struct cache *ca;
1440 struct bcache_device **d;
1441 unsigned i; 1427 unsigned i;
1442 1428
1443 mutex_lock(&c->bucket_lock); 1429 mutex_lock(&c->bucket_lock);
@@ -1480,22 +1466,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
1480 } 1466 }
1481 } 1467 }
1482 1468
1483 for (d = c->devices;
1484 d < c->devices + c->nr_uuids;
1485 d++)
1486 if (*d) {
1487 unsigned long last =
1488 atomic_long_read(&((*d)->sectors_dirty));
1489 long difference = (*d)->sectors_dirty_gc - last;
1490
1491 pr_debug("sectors dirty off by %li", difference);
1492
1493 (*d)->sectors_dirty_last += difference;
1494
1495 atomic_long_set(&((*d)->sectors_dirty),
1496 (*d)->sectors_dirty_gc);
1497 }
1498
1499 mutex_unlock(&c->bucket_lock); 1469 mutex_unlock(&c->bucket_lock);
1500 return available; 1470 return available;
1501} 1471}
@@ -1508,10 +1478,9 @@ static void bch_btree_gc(struct closure *cl)
1508 struct gc_stat stats; 1478 struct gc_stat stats;
1509 struct closure writes; 1479 struct closure writes;
1510 struct btree_op op; 1480 struct btree_op op;
1511
1512 uint64_t start_time = local_clock(); 1481 uint64_t start_time = local_clock();
1513 trace_bcache_gc_start(c->sb.set_uuid); 1482
1514 blktrace_msg_all(c, "Starting gc"); 1483 trace_bcache_gc_start(c);
1515 1484
1516 memset(&stats, 0, sizeof(struct gc_stat)); 1485 memset(&stats, 0, sizeof(struct gc_stat));
1517 closure_init_stack(&writes); 1486 closure_init_stack(&writes);
@@ -1520,14 +1489,14 @@ static void bch_btree_gc(struct closure *cl)
1520 1489
1521 btree_gc_start(c); 1490 btree_gc_start(c);
1522 1491
1492 atomic_inc(&c->prio_blocked);
1493
1523 ret = btree_root(gc_root, c, &op, &writes, &stats); 1494 ret = btree_root(gc_root, c, &op, &writes, &stats);
1524 closure_sync(&op.cl); 1495 closure_sync(&op.cl);
1525 closure_sync(&writes); 1496 closure_sync(&writes);
1526 1497
1527 if (ret) { 1498 if (ret) {
1528 blktrace_msg_all(c, "Stopped gc");
1529 pr_warn("gc failed!"); 1499 pr_warn("gc failed!");
1530
1531 continue_at(cl, bch_btree_gc, bch_gc_wq); 1500 continue_at(cl, bch_btree_gc, bch_gc_wq);
1532 } 1501 }
1533 1502
@@ -1537,6 +1506,9 @@ static void bch_btree_gc(struct closure *cl)
1537 1506
1538 available = bch_btree_gc_finish(c); 1507 available = bch_btree_gc_finish(c);
1539 1508
1509 atomic_dec(&c->prio_blocked);
1510 wake_up_allocators(c);
1511
1540 bch_time_stats_update(&c->btree_gc_time, start_time); 1512 bch_time_stats_update(&c->btree_gc_time, start_time);
1541 1513
1542 stats.key_bytes *= sizeof(uint64_t); 1514 stats.key_bytes *= sizeof(uint64_t);
@@ -1544,10 +1516,8 @@ static void bch_btree_gc(struct closure *cl)
1544 stats.data <<= 9; 1516 stats.data <<= 9;
1545 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; 1517 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1546 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1518 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1547 blktrace_msg_all(c, "Finished gc");
1548 1519
1549 trace_bcache_gc_end(c->sb.set_uuid); 1520 trace_bcache_gc_end(c);
1550 wake_up(&c->alloc_wait);
1551 1521
1552 continue_at(cl, bch_moving_gc, bch_gc_wq); 1522 continue_at(cl, bch_moving_gc, bch_gc_wq);
1553} 1523}
@@ -1654,14 +1624,14 @@ static bool fix_overlapping_extents(struct btree *b,
1654 struct btree_iter *iter, 1624 struct btree_iter *iter,
1655 struct btree_op *op) 1625 struct btree_op *op)
1656{ 1626{
1657 void subtract_dirty(struct bkey *k, int sectors) 1627 void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
1658 { 1628 {
1659 struct bcache_device *d = b->c->devices[KEY_INODE(k)]; 1629 if (KEY_DIRTY(k))
1660 1630 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1661 if (KEY_DIRTY(k) && d) 1631 offset, -sectors);
1662 atomic_long_sub(sectors, &d->sectors_dirty);
1663 } 1632 }
1664 1633
1634 uint64_t old_offset;
1665 unsigned old_size, sectors_found = 0; 1635 unsigned old_size, sectors_found = 0;
1666 1636
1667 while (1) { 1637 while (1) {
@@ -1673,6 +1643,7 @@ static bool fix_overlapping_extents(struct btree *b,
1673 if (bkey_cmp(k, &START_KEY(insert)) <= 0) 1643 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
1674 continue; 1644 continue;
1675 1645
1646 old_offset = KEY_START(k);
1676 old_size = KEY_SIZE(k); 1647 old_size = KEY_SIZE(k);
1677 1648
1678 /* 1649 /*
@@ -1728,7 +1699,7 @@ static bool fix_overlapping_extents(struct btree *b,
1728 1699
1729 struct bkey *top; 1700 struct bkey *top;
1730 1701
1731 subtract_dirty(k, KEY_SIZE(insert)); 1702 subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
1732 1703
1733 if (bkey_written(b, k)) { 1704 if (bkey_written(b, k)) {
1734 /* 1705 /*
@@ -1775,7 +1746,7 @@ static bool fix_overlapping_extents(struct btree *b,
1775 } 1746 }
1776 } 1747 }
1777 1748
1778 subtract_dirty(k, old_size - KEY_SIZE(k)); 1749 subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
1779 } 1750 }
1780 1751
1781check_failed: 1752check_failed:
@@ -1798,7 +1769,7 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
1798{ 1769{
1799 struct bset *i = b->sets[b->nsets].data; 1770 struct bset *i = b->sets[b->nsets].data;
1800 struct bkey *m, *prev; 1771 struct bkey *m, *prev;
1801 const char *status = "insert"; 1772 unsigned status = BTREE_INSERT_STATUS_INSERT;
1802 1773
1803 BUG_ON(bkey_cmp(k, &b->key) > 0); 1774 BUG_ON(bkey_cmp(k, &b->key) > 0);
1804 BUG_ON(b->level && !KEY_PTRS(k)); 1775 BUG_ON(b->level && !KEY_PTRS(k));
@@ -1831,17 +1802,17 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
1831 goto insert; 1802 goto insert;
1832 1803
1833 /* prev is in the tree, if we merge we're done */ 1804 /* prev is in the tree, if we merge we're done */
1834 status = "back merging"; 1805 status = BTREE_INSERT_STATUS_BACK_MERGE;
1835 if (prev && 1806 if (prev &&
1836 bch_bkey_try_merge(b, prev, k)) 1807 bch_bkey_try_merge(b, prev, k))
1837 goto merged; 1808 goto merged;
1838 1809
1839 status = "overwrote front"; 1810 status = BTREE_INSERT_STATUS_OVERWROTE;
1840 if (m != end(i) && 1811 if (m != end(i) &&
1841 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) 1812 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
1842 goto copy; 1813 goto copy;
1843 1814
1844 status = "front merge"; 1815 status = BTREE_INSERT_STATUS_FRONT_MERGE;
1845 if (m != end(i) && 1816 if (m != end(i) &&
1846 bch_bkey_try_merge(b, k, m)) 1817 bch_bkey_try_merge(b, k, m))
1847 goto copy; 1818 goto copy;
@@ -1851,21 +1822,21 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
1851insert: shift_keys(b, m, k); 1822insert: shift_keys(b, m, k);
1852copy: bkey_copy(m, k); 1823copy: bkey_copy(m, k);
1853merged: 1824merged:
1854 bch_check_keys(b, "%s for %s at %s: %s", status, 1825 if (KEY_DIRTY(k))
1855 op_type(op), pbtree(b), pkey(k)); 1826 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
1856 bch_check_key_order_msg(b, i, "%s for %s at %s: %s", status, 1827 KEY_START(k), KEY_SIZE(k));
1857 op_type(op), pbtree(b), pkey(k)); 1828
1829 bch_check_keys(b, "%u for %s", status, op_type(op));
1858 1830
1859 if (b->level && !KEY_OFFSET(k)) 1831 if (b->level && !KEY_OFFSET(k))
1860 b->prio_blocked++; 1832 btree_current_write(b)->prio_blocked++;
1861 1833
1862 pr_debug("%s for %s at %s: %s", status, 1834 trace_bcache_btree_insert_key(b, k, op->type, status);
1863 op_type(op), pbtree(b), pkey(k));
1864 1835
1865 return true; 1836 return true;
1866} 1837}
1867 1838
1868bool bch_btree_insert_keys(struct btree *b, struct btree_op *op) 1839static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op)
1869{ 1840{
1870 bool ret = false; 1841 bool ret = false;
1871 struct bkey *k; 1842 struct bkey *k;
@@ -1896,7 +1867,7 @@ bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
1896 should_split(b)) 1867 should_split(b))
1897 goto out; 1868 goto out;
1898 1869
1899 op->replace = KEY(op->inode, bio_end(bio), bio_sectors(bio)); 1870 op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio));
1900 1871
1901 SET_KEY_PTRS(&op->replace, 1); 1872 SET_KEY_PTRS(&op->replace, 1);
1902 get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t)); 1873 get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
@@ -1907,7 +1878,6 @@ bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
1907 1878
1908 BUG_ON(op->type != BTREE_INSERT); 1879 BUG_ON(op->type != BTREE_INSERT);
1909 BUG_ON(!btree_insert_key(b, op, &tmp.k)); 1880 BUG_ON(!btree_insert_key(b, op, &tmp.k));
1910 bch_btree_write(b, false, NULL);
1911 ret = true; 1881 ret = true;
1912out: 1882out:
1913 downgrade_write(&b->lock); 1883 downgrade_write(&b->lock);
@@ -1929,12 +1899,11 @@ static int btree_split(struct btree *b, struct btree_op *op)
1929 1899
1930 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5; 1900 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
1931 1901
1932 pr_debug("%ssplitting at %s keys %i", split ? "" : "not ",
1933 pbtree(b), n1->sets[0].data->keys);
1934
1935 if (split) { 1902 if (split) {
1936 unsigned keys = 0; 1903 unsigned keys = 0;
1937 1904
1905 trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
1906
1938 n2 = bch_btree_node_alloc(b->c, b->level, &op->cl); 1907 n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
1939 if (IS_ERR(n2)) 1908 if (IS_ERR(n2))
1940 goto err_free1; 1909 goto err_free1;
@@ -1967,18 +1936,21 @@ static int btree_split(struct btree *b, struct btree_op *op)
1967 bkey_copy_key(&n2->key, &b->key); 1936 bkey_copy_key(&n2->key, &b->key);
1968 1937
1969 bch_keylist_add(&op->keys, &n2->key); 1938 bch_keylist_add(&op->keys, &n2->key);
1970 bch_btree_write(n2, true, op); 1939 bch_btree_node_write(n2, &op->cl);
1971 rw_unlock(true, n2); 1940 rw_unlock(true, n2);
1972 } else 1941 } else {
1942 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
1943
1973 bch_btree_insert_keys(n1, op); 1944 bch_btree_insert_keys(n1, op);
1945 }
1974 1946
1975 bch_keylist_add(&op->keys, &n1->key); 1947 bch_keylist_add(&op->keys, &n1->key);
1976 bch_btree_write(n1, true, op); 1948 bch_btree_node_write(n1, &op->cl);
1977 1949
1978 if (n3) { 1950 if (n3) {
1979 bkey_copy_key(&n3->key, &MAX_KEY); 1951 bkey_copy_key(&n3->key, &MAX_KEY);
1980 bch_btree_insert_keys(n3, op); 1952 bch_btree_insert_keys(n3, op);
1981 bch_btree_write(n3, true, op); 1953 bch_btree_node_write(n3, &op->cl);
1982 1954
1983 closure_sync(&op->cl); 1955 closure_sync(&op->cl);
1984 bch_btree_set_root(n3); 1956 bch_btree_set_root(n3);
@@ -2082,8 +2054,12 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
2082 2054
2083 BUG_ON(write_block(b) != b->sets[b->nsets].data); 2055 BUG_ON(write_block(b) != b->sets[b->nsets].data);
2084 2056
2085 if (bch_btree_insert_keys(b, op)) 2057 if (bch_btree_insert_keys(b, op)) {
2086 bch_btree_write(b, false, op); 2058 if (!b->level)
2059 bch_btree_leaf_dirty(b, op);
2060 else
2061 bch_btree_node_write(b, &op->cl);
2062 }
2087 } 2063 }
2088 2064
2089 return 0; 2065 return 0;
@@ -2140,6 +2116,11 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c)
2140void bch_btree_set_root(struct btree *b) 2116void bch_btree_set_root(struct btree *b)
2141{ 2117{
2142 unsigned i; 2118 unsigned i;
2119 struct closure cl;
2120
2121 closure_init_stack(&cl);
2122
2123 trace_bcache_btree_set_root(b);
2143 2124
2144 BUG_ON(!b->written); 2125 BUG_ON(!b->written);
2145 2126
@@ -2153,8 +2134,8 @@ void bch_btree_set_root(struct btree *b)
2153 b->c->root = b; 2134 b->c->root = b;
2154 __bkey_put(b->c, &b->key); 2135 __bkey_put(b->c, &b->key);
2155 2136
2156 bch_journal_meta(b->c, NULL); 2137 bch_journal_meta(b->c, &cl);
2157 pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0)); 2138 closure_sync(&cl);
2158} 2139}
2159 2140
2160/* Cache lookup */ 2141/* Cache lookup */
@@ -2215,9 +2196,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
2215 KEY_OFFSET(k) - bio->bi_sector); 2196 KEY_OFFSET(k) - bio->bi_sector);
2216 2197
2217 n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 2198 n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
2218 if (!n)
2219 return -EAGAIN;
2220
2221 if (n == bio) 2199 if (n == bio)
2222 op->lookup_done = true; 2200 op->lookup_done = true;
2223 2201
@@ -2240,7 +2218,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
2240 n->bi_end_io = bch_cache_read_endio; 2218 n->bi_end_io = bch_cache_read_endio;
2241 n->bi_private = &s->cl; 2219 n->bi_private = &s->cl;
2242 2220
2243 trace_bcache_cache_hit(n);
2244 __bch_submit_bbio(n, b->c); 2221 __bch_submit_bbio(n, b->c);
2245 } 2222 }
2246 2223
@@ -2257,9 +2234,6 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
2257 struct btree_iter iter; 2234 struct btree_iter iter;
2258 bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0)); 2235 bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
2259 2236
2260 pr_debug("at %s searching for %u:%llu", pbtree(b), op->inode,
2261 (uint64_t) bio->bi_sector);
2262
2263 do { 2237 do {
2264 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); 2238 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
2265 if (!k) { 2239 if (!k) {
@@ -2303,7 +2277,8 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2303} 2277}
2304 2278
2305static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, 2279static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
2306 struct keybuf *buf, struct bkey *end) 2280 struct keybuf *buf, struct bkey *end,
2281 keybuf_pred_fn *pred)
2307{ 2282{
2308 struct btree_iter iter; 2283 struct btree_iter iter;
2309 bch_btree_iter_init(b, &iter, &buf->last_scanned); 2284 bch_btree_iter_init(b, &iter, &buf->last_scanned);
@@ -2322,11 +2297,9 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
2322 if (bkey_cmp(&buf->last_scanned, end) >= 0) 2297 if (bkey_cmp(&buf->last_scanned, end) >= 0)
2323 break; 2298 break;
2324 2299
2325 if (buf->key_predicate(buf, k)) { 2300 if (pred(buf, k)) {
2326 struct keybuf_key *w; 2301 struct keybuf_key *w;
2327 2302
2328 pr_debug("%s", pkey(k));
2329
2330 spin_lock(&buf->lock); 2303 spin_lock(&buf->lock);
2331 2304
2332 w = array_alloc(&buf->freelist); 2305 w = array_alloc(&buf->freelist);
@@ -2343,7 +2316,7 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
2343 if (!k) 2316 if (!k)
2344 break; 2317 break;
2345 2318
2346 btree(refill_keybuf, k, b, op, buf, end); 2319 btree(refill_keybuf, k, b, op, buf, end, pred);
2347 /* 2320 /*
2348 * Might get an error here, but can't really do anything 2321 * Might get an error here, but can't really do anything
2349 * and it'll get logged elsewhere. Just read what we 2322 * and it'll get logged elsewhere. Just read what we
@@ -2361,7 +2334,7 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
2361} 2334}
2362 2335
2363void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 2336void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2364 struct bkey *end) 2337 struct bkey *end, keybuf_pred_fn *pred)
2365{ 2338{
2366 struct bkey start = buf->last_scanned; 2339 struct bkey start = buf->last_scanned;
2367 struct btree_op op; 2340 struct btree_op op;
@@ -2369,7 +2342,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2369 2342
2370 cond_resched(); 2343 cond_resched();
2371 2344
2372 btree_root(refill_keybuf, c, &op, buf, end); 2345 btree_root(refill_keybuf, c, &op, buf, end, pred);
2373 closure_sync(&op.cl); 2346 closure_sync(&op.cl);
2374 2347
2375 pr_debug("found %s keys from %llu:%llu to %llu:%llu", 2348 pr_debug("found %s keys from %llu:%llu to %llu:%llu",
@@ -2455,7 +2428,8 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2455 2428
2456struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2429struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2457 struct keybuf *buf, 2430 struct keybuf *buf,
2458 struct bkey *end) 2431 struct bkey *end,
2432 keybuf_pred_fn *pred)
2459{ 2433{
2460 struct keybuf_key *ret; 2434 struct keybuf_key *ret;
2461 2435
@@ -2469,15 +2443,14 @@ struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2469 break; 2443 break;
2470 } 2444 }
2471 2445
2472 bch_refill_keybuf(c, buf, end); 2446 bch_refill_keybuf(c, buf, end, pred);
2473 } 2447 }
2474 2448
2475 return ret; 2449 return ret;
2476} 2450}
2477 2451
2478void bch_keybuf_init(struct keybuf *buf, keybuf_pred_fn *fn) 2452void bch_keybuf_init(struct keybuf *buf)
2479{ 2453{
2480 buf->key_predicate = fn;
2481 buf->last_scanned = MAX_KEY; 2454 buf->last_scanned = MAX_KEY;
2482 buf->keys = RB_ROOT; 2455 buf->keys = RB_ROOT;
2483 2456
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index af4a7092a28c..3333d3723633 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -102,7 +102,6 @@
102#include "debug.h" 102#include "debug.h"
103 103
104struct btree_write { 104struct btree_write {
105 struct closure *owner;
106 atomic_t *journal; 105 atomic_t *journal;
107 106
108 /* If btree_split() frees a btree node, it writes a new pointer to that 107 /* If btree_split() frees a btree node, it writes a new pointer to that
@@ -142,16 +141,12 @@ struct btree {
142 */ 141 */
143 struct bset_tree sets[MAX_BSETS]; 142 struct bset_tree sets[MAX_BSETS];
144 143
145 /* Used to refcount bio splits, also protects b->bio */ 144 /* For outstanding btree writes, used as a lock - protects write_idx */
146 struct closure_with_waitlist io; 145 struct closure_with_waitlist io;
147 146
148 /* Gets transferred to w->prio_blocked - see the comment there */
149 int prio_blocked;
150
151 struct list_head list; 147 struct list_head list;
152 struct delayed_work work; 148 struct delayed_work work;
153 149
154 uint64_t io_start_time;
155 struct btree_write writes[2]; 150 struct btree_write writes[2];
156 struct bio *bio; 151 struct bio *bio;
157}; 152};
@@ -164,13 +159,11 @@ static inline void set_btree_node_ ## flag(struct btree *b) \
164{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \ 159{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \
165 160
166enum btree_flags { 161enum btree_flags {
167 BTREE_NODE_read_done,
168 BTREE_NODE_io_error, 162 BTREE_NODE_io_error,
169 BTREE_NODE_dirty, 163 BTREE_NODE_dirty,
170 BTREE_NODE_write_idx, 164 BTREE_NODE_write_idx,
171}; 165};
172 166
173BTREE_FLAG(read_done);
174BTREE_FLAG(io_error); 167BTREE_FLAG(io_error);
175BTREE_FLAG(dirty); 168BTREE_FLAG(dirty);
176BTREE_FLAG(write_idx); 169BTREE_FLAG(write_idx);
@@ -278,6 +271,13 @@ struct btree_op {
278 BKEY_PADDED(replace); 271 BKEY_PADDED(replace);
279}; 272};
280 273
274enum {
275 BTREE_INSERT_STATUS_INSERT,
276 BTREE_INSERT_STATUS_BACK_MERGE,
277 BTREE_INSERT_STATUS_OVERWROTE,
278 BTREE_INSERT_STATUS_FRONT_MERGE,
279};
280
281void bch_btree_op_init_stack(struct btree_op *); 281void bch_btree_op_init_stack(struct btree_op *);
282 282
283static inline void rw_lock(bool w, struct btree *b, int level) 283static inline void rw_lock(bool w, struct btree *b, int level)
@@ -293,9 +293,7 @@ static inline void rw_unlock(bool w, struct btree *b)
293#ifdef CONFIG_BCACHE_EDEBUG 293#ifdef CONFIG_BCACHE_EDEBUG
294 unsigned i; 294 unsigned i;
295 295
296 if (w && 296 if (w && b->key.ptr[0])
297 b->key.ptr[0] &&
298 btree_node_read_done(b))
299 for (i = 0; i <= b->nsets; i++) 297 for (i = 0; i <= b->nsets; i++)
300 bch_check_key_order(b, b->sets[i].data); 298 bch_check_key_order(b, b->sets[i].data);
301#endif 299#endif
@@ -370,9 +368,8 @@ static inline bool should_split(struct btree *b)
370 > btree_blocks(b)); 368 > btree_blocks(b));
371} 369}
372 370
373void bch_btree_read_done(struct closure *); 371void bch_btree_node_read(struct btree *);
374void bch_btree_read(struct btree *); 372void bch_btree_node_write(struct btree *, struct closure *);
375void bch_btree_write(struct btree *b, bool now, struct btree_op *op);
376 373
377void bch_cannibalize_unlock(struct cache_set *, struct closure *); 374void bch_cannibalize_unlock(struct cache_set *, struct closure *);
378void bch_btree_set_root(struct btree *); 375void bch_btree_set_root(struct btree *);
@@ -380,7 +377,6 @@ struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
380struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, 377struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
381 int, struct btree_op *); 378 int, struct btree_op *);
382 379
383bool bch_btree_insert_keys(struct btree *, struct btree_op *);
384bool bch_btree_insert_check_key(struct btree *, struct btree_op *, 380bool bch_btree_insert_check_key(struct btree *, struct btree_op *,
385 struct bio *); 381 struct bio *);
386int bch_btree_insert(struct btree_op *, struct cache_set *); 382int bch_btree_insert(struct btree_op *, struct cache_set *);
@@ -393,13 +389,14 @@ void bch_moving_gc(struct closure *);
393int bch_btree_check(struct cache_set *, struct btree_op *); 389int bch_btree_check(struct cache_set *, struct btree_op *);
394uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); 390uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
395 391
396void bch_keybuf_init(struct keybuf *, keybuf_pred_fn *); 392void bch_keybuf_init(struct keybuf *);
397void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *); 393void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *,
394 keybuf_pred_fn *);
398bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, 395bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
399 struct bkey *); 396 struct bkey *);
400void bch_keybuf_del(struct keybuf *, struct keybuf_key *); 397void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
401struct keybuf_key *bch_keybuf_next(struct keybuf *); 398struct keybuf_key *bch_keybuf_next(struct keybuf *);
402struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, 399struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
403 struct keybuf *, struct bkey *); 400 struct bkey *, keybuf_pred_fn *);
404 401
405#endif 402#endif
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index bd05a9a8c7cf..9aba2017f0d1 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -66,16 +66,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
66 } else { 66 } else {
67 struct closure *parent = cl->parent; 67 struct closure *parent = cl->parent;
68 struct closure_waitlist *wait = closure_waitlist(cl); 68 struct closure_waitlist *wait = closure_waitlist(cl);
69 closure_fn *destructor = cl->fn;
69 70
70 closure_debug_destroy(cl); 71 closure_debug_destroy(cl);
71 72
73 smp_mb();
72 atomic_set(&cl->remaining, -1); 74 atomic_set(&cl->remaining, -1);
73 75
74 if (wait) 76 if (wait)
75 closure_wake_up(wait); 77 closure_wake_up(wait);
76 78
77 if (cl->fn) 79 if (destructor)
78 cl->fn(cl); 80 destructor(cl);
79 81
80 if (parent) 82 if (parent)
81 closure_put(parent); 83 closure_put(parent);
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 89fd5204924e..88e6411eab4f 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -47,11 +47,10 @@ const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
47 return ""; 47 return "";
48} 48}
49 49
50struct keyprint_hack bch_pkey(const struct bkey *k) 50int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
51{ 51{
52 unsigned i = 0; 52 unsigned i = 0;
53 struct keyprint_hack r; 53 char *out = buf, *end = buf + size;
54 char *out = r.s, *end = r.s + KEYHACK_SIZE;
55 54
56#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) 55#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
57 56
@@ -75,16 +74,14 @@ struct keyprint_hack bch_pkey(const struct bkey *k)
75 if (KEY_CSUM(k)) 74 if (KEY_CSUM(k))
76 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); 75 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
77#undef p 76#undef p
78 return r; 77 return out - buf;
79} 78}
80 79
81struct keyprint_hack bch_pbtree(const struct btree *b) 80int bch_btree_to_text(char *buf, size_t size, const struct btree *b)
82{ 81{
83 struct keyprint_hack r; 82 return scnprintf(buf, size, "%zu level %i/%i",
84 83 PTR_BUCKET_NR(b->c, &b->key, 0),
85 snprintf(r.s, 40, "%zu level %i/%i", PTR_BUCKET_NR(b->c, &b->key, 0), 84 b->level, b->c->root ? b->c->root->level : -1);
86 b->level, b->c->root ? b->c->root->level : -1);
87 return r;
88} 85}
89 86
90#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG) 87#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
@@ -100,10 +97,12 @@ static void dump_bset(struct btree *b, struct bset *i)
100{ 97{
101 struct bkey *k; 98 struct bkey *k;
102 unsigned j; 99 unsigned j;
100 char buf[80];
103 101
104 for (k = i->start; k < end(i); k = bkey_next(k)) { 102 for (k = i->start; k < end(i); k = bkey_next(k)) {
103 bch_bkey_to_text(buf, sizeof(buf), k);
105 printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), 104 printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
106 (uint64_t *) k - i->d, i->keys, pkey(k)); 105 (uint64_t *) k - i->d, i->keys, buf);
107 106
108 for (j = 0; j < KEY_PTRS(k); j++) { 107 for (j = 0; j < KEY_PTRS(k); j++) {
109 size_t n = PTR_BUCKET_NR(b->c, k, j); 108 size_t n = PTR_BUCKET_NR(b->c, k, j);
@@ -144,7 +143,7 @@ void bch_btree_verify(struct btree *b, struct bset *new)
144 v->written = 0; 143 v->written = 0;
145 v->level = b->level; 144 v->level = b->level;
146 145
147 bch_btree_read(v); 146 bch_btree_node_read(v);
148 closure_wait_event(&v->io.wait, &cl, 147 closure_wait_event(&v->io.wait, &cl,
149 atomic_read(&b->io.cl.remaining) == -1); 148 atomic_read(&b->io.cl.remaining) == -1);
150 149
@@ -200,7 +199,7 @@ void bch_data_verify(struct search *s)
200 if (!check) 199 if (!check)
201 return; 200 return;
202 201
203 if (bch_bio_alloc_pages(check, GFP_NOIO)) 202 if (bio_alloc_pages(check, GFP_NOIO))
204 goto out_put; 203 goto out_put;
205 204
206 check->bi_rw = READ_SYNC; 205 check->bi_rw = READ_SYNC;
@@ -252,6 +251,7 @@ static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
252 va_list args) 251 va_list args)
253{ 252{
254 unsigned i; 253 unsigned i;
254 char buf[80];
255 255
256 console_lock(); 256 console_lock();
257 257
@@ -262,7 +262,8 @@ static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
262 262
263 console_unlock(); 263 console_unlock();
264 264
265 panic("at %s\n", pbtree(b)); 265 bch_btree_to_text(buf, sizeof(buf), b);
266 panic("at %s\n", buf);
266} 267}
267 268
268void bch_check_key_order_msg(struct btree *b, struct bset *i, 269void bch_check_key_order_msg(struct btree *b, struct bset *i,
@@ -337,6 +338,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
337{ 338{
338 struct dump_iterator *i = file->private_data; 339 struct dump_iterator *i = file->private_data;
339 ssize_t ret = 0; 340 ssize_t ret = 0;
341 char kbuf[80];
340 342
341 while (size) { 343 while (size) {
342 struct keybuf_key *w; 344 struct keybuf_key *w;
@@ -355,11 +357,12 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
355 if (i->bytes) 357 if (i->bytes)
356 break; 358 break;
357 359
358 w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY); 360 w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
359 if (!w) 361 if (!w)
360 break; 362 break;
361 363
362 i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", pkey(&w->key)); 364 bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
365 i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
363 bch_keybuf_del(&i->keys, w); 366 bch_keybuf_del(&i->keys, w);
364 } 367 }
365 368
@@ -377,7 +380,7 @@ static int bch_dump_open(struct inode *inode, struct file *file)
377 380
378 file->private_data = i; 381 file->private_data = i;
379 i->c = c; 382 i->c = c;
380 bch_keybuf_init(&i->keys, dump_pred); 383 bch_keybuf_init(&i->keys);
381 i->keys.last_scanned = KEY(0, 0, 0); 384 i->keys.last_scanned = KEY(0, 0, 0);
382 385
383 return 0; 386 return 0;
@@ -409,142 +412,6 @@ void bch_debug_init_cache_set(struct cache_set *c)
409 412
410#endif 413#endif
411 414
412/* Fuzz tester has rotted: */
413#if 0
414
415static ssize_t btree_fuzz(struct kobject *k, struct kobj_attribute *a,
416 const char *buffer, size_t size)
417{
418 void dump(struct btree *b)
419 {
420 struct bset *i;
421
422 for (i = b->sets[0].data;
423 index(i, b) < btree_blocks(b) &&
424 i->seq == b->sets[0].data->seq;
425 i = ((void *) i) + set_blocks(i, b->c) * block_bytes(b->c))
426 dump_bset(b, i);
427 }
428
429 struct cache_sb *sb;
430 struct cache_set *c;
431 struct btree *all[3], *b, *fill, *orig;
432 int j;
433
434 struct btree_op op;
435 bch_btree_op_init_stack(&op);
436
437 sb = kzalloc(sizeof(struct cache_sb), GFP_KERNEL);
438 if (!sb)
439 return -ENOMEM;
440
441 sb->bucket_size = 128;
442 sb->block_size = 4;
443
444 c = bch_cache_set_alloc(sb);
445 if (!c)
446 return -ENOMEM;
447
448 for (j = 0; j < 3; j++) {
449 BUG_ON(list_empty(&c->btree_cache));
450 all[j] = list_first_entry(&c->btree_cache, struct btree, list);
451 list_del_init(&all[j]->list);
452
453 all[j]->key = KEY(0, 0, c->sb.bucket_size);
454 bkey_copy_key(&all[j]->key, &MAX_KEY);
455 }
456
457 b = all[0];
458 fill = all[1];
459 orig = all[2];
460
461 while (1) {
462 for (j = 0; j < 3; j++)
463 all[j]->written = all[j]->nsets = 0;
464
465 bch_bset_init_next(b);
466
467 while (1) {
468 struct bset *i = write_block(b);
469 struct bkey *k = op.keys.top;
470 unsigned rand;
471
472 bkey_init(k);
473 rand = get_random_int();
474
475 op.type = rand & 1
476 ? BTREE_INSERT
477 : BTREE_REPLACE;
478 rand >>= 1;
479
480 SET_KEY_SIZE(k, bucket_remainder(c, rand));
481 rand >>= c->bucket_bits;
482 rand &= 1024 * 512 - 1;
483 rand += c->sb.bucket_size;
484 SET_KEY_OFFSET(k, rand);
485#if 0
486 SET_KEY_PTRS(k, 1);
487#endif
488 bch_keylist_push(&op.keys);
489 bch_btree_insert_keys(b, &op);
490
491 if (should_split(b) ||
492 set_blocks(i, b->c) !=
493 __set_blocks(i, i->keys + 15, b->c)) {
494 i->csum = csum_set(i);
495
496 memcpy(write_block(fill),
497 i, set_bytes(i));
498
499 b->written += set_blocks(i, b->c);
500 fill->written = b->written;
501 if (b->written == btree_blocks(b))
502 break;
503
504 bch_btree_sort_lazy(b);
505 bch_bset_init_next(b);
506 }
507 }
508
509 memcpy(orig->sets[0].data,
510 fill->sets[0].data,
511 btree_bytes(c));
512
513 bch_btree_sort(b);
514 fill->written = 0;
515 bch_btree_read_done(&fill->io.cl);
516
517 if (b->sets[0].data->keys != fill->sets[0].data->keys ||
518 memcmp(b->sets[0].data->start,
519 fill->sets[0].data->start,
520 b->sets[0].data->keys * sizeof(uint64_t))) {
521 struct bset *i = b->sets[0].data;
522 struct bkey *k, *l;
523
524 for (k = i->start,
525 l = fill->sets[0].data->start;
526 k < end(i);
527 k = bkey_next(k), l = bkey_next(l))
528 if (bkey_cmp(k, l) ||
529 KEY_SIZE(k) != KEY_SIZE(l))
530 pr_err("key %zi differs: %s != %s",
531 (uint64_t *) k - i->d,
532 pkey(k), pkey(l));
533
534 for (j = 0; j < 3; j++) {
535 pr_err("**** Set %i ****", j);
536 dump(all[j]);
537 }
538 panic("\n");
539 }
540
541 pr_info("fuzz complete: %i keys", b->sets[0].data->keys);
542 }
543}
544
545kobj_attribute_write(fuzz, btree_fuzz);
546#endif
547
548void bch_debug_exit(void) 415void bch_debug_exit(void)
549{ 416{
550 if (!IS_ERR_OR_NULL(debug)) 417 if (!IS_ERR_OR_NULL(debug))
@@ -554,11 +421,6 @@ void bch_debug_exit(void)
554int __init bch_debug_init(struct kobject *kobj) 421int __init bch_debug_init(struct kobject *kobj)
555{ 422{
556 int ret = 0; 423 int ret = 0;
557#if 0
558 ret = sysfs_create_file(kobj, &ksysfs_fuzz.attr);
559 if (ret)
560 return ret;
561#endif
562 424
563 debug = debugfs_create_dir("bcache", NULL); 425 debug = debugfs_create_dir("bcache", NULL);
564 return ret; 426 return ret;
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index f9378a218148..1c39b5a2489b 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -3,15 +3,8 @@
3 3
4/* Btree/bkey debug printing */ 4/* Btree/bkey debug printing */
5 5
6#define KEYHACK_SIZE 80 6int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
7struct keyprint_hack { 7int bch_btree_to_text(char *buf, size_t size, const struct btree *b);
8 char s[KEYHACK_SIZE];
9};
10
11struct keyprint_hack bch_pkey(const struct bkey *k);
12struct keyprint_hack bch_pbtree(const struct btree *b);
13#define pkey(k) (&bch_pkey(k).s[0])
14#define pbtree(b) (&bch_pbtree(b).s[0])
15 8
16#ifdef CONFIG_BCACHE_EDEBUG 9#ifdef CONFIG_BCACHE_EDEBUG
17 10
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 48efd4dea645..9056632995b1 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -9,6 +9,8 @@
9#include "bset.h" 9#include "bset.h"
10#include "debug.h" 10#include "debug.h"
11 11
12#include <linux/blkdev.h>
13
12static void bch_bi_idx_hack_endio(struct bio *bio, int error) 14static void bch_bi_idx_hack_endio(struct bio *bio, int error)
13{ 15{
14 struct bio *p = bio->bi_private; 16 struct bio *p = bio->bi_private;
@@ -66,13 +68,6 @@ static void bch_generic_make_request_hack(struct bio *bio)
66 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a 68 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
67 * bvec boundry; it is the caller's responsibility to ensure that @bio is not 69 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
68 * freed before the split. 70 * freed before the split.
69 *
70 * If bch_bio_split() is running under generic_make_request(), it's not safe to
71 * allocate more than one bio from the same bio set. Therefore, if it is running
72 * under generic_make_request() it masks out __GFP_WAIT when doing the
73 * allocation. The caller must check for failure if there's any possibility of
74 * it being called from under generic_make_request(); it is then the caller's
75 * responsibility to retry from a safe context (by e.g. punting to workqueue).
76 */ 71 */
77struct bio *bch_bio_split(struct bio *bio, int sectors, 72struct bio *bch_bio_split(struct bio *bio, int sectors,
78 gfp_t gfp, struct bio_set *bs) 73 gfp_t gfp, struct bio_set *bs)
@@ -83,20 +78,13 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
83 78
84 BUG_ON(sectors <= 0); 79 BUG_ON(sectors <= 0);
85 80
86 /*
87 * If we're being called from underneath generic_make_request() and we
88 * already allocated any bios from this bio set, we risk deadlock if we
89 * use the mempool. So instead, we possibly fail and let the caller punt
90 * to workqueue or somesuch and retry in a safe context.
91 */
92 if (current->bio_list)
93 gfp &= ~__GFP_WAIT;
94
95 if (sectors >= bio_sectors(bio)) 81 if (sectors >= bio_sectors(bio))
96 return bio; 82 return bio;
97 83
98 if (bio->bi_rw & REQ_DISCARD) { 84 if (bio->bi_rw & REQ_DISCARD) {
99 ret = bio_alloc_bioset(gfp, 1, bs); 85 ret = bio_alloc_bioset(gfp, 1, bs);
86 if (!ret)
87 return NULL;
100 idx = 0; 88 idx = 0;
101 goto out; 89 goto out;
102 } 90 }
@@ -160,17 +148,18 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
160 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 148 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
161 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, 149 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
162 queue_max_segments(q)); 150 queue_max_segments(q));
163 struct bio_vec *bv, *end = bio_iovec(bio) +
164 min_t(int, bio_segments(bio), max_segments);
165 151
166 if (bio->bi_rw & REQ_DISCARD) 152 if (bio->bi_rw & REQ_DISCARD)
167 return min(ret, q->limits.max_discard_sectors); 153 return min(ret, q->limits.max_discard_sectors);
168 154
169 if (bio_segments(bio) > max_segments || 155 if (bio_segments(bio) > max_segments ||
170 q->merge_bvec_fn) { 156 q->merge_bvec_fn) {
157 struct bio_vec *bv;
158 int i, seg = 0;
159
171 ret = 0; 160 ret = 0;
172 161
173 for (bv = bio_iovec(bio); bv < end; bv++) { 162 bio_for_each_segment(bv, bio, i) {
174 struct bvec_merge_data bvm = { 163 struct bvec_merge_data bvm = {
175 .bi_bdev = bio->bi_bdev, 164 .bi_bdev = bio->bi_bdev,
176 .bi_sector = bio->bi_sector, 165 .bi_sector = bio->bi_sector,
@@ -178,10 +167,14 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
178 .bi_rw = bio->bi_rw, 167 .bi_rw = bio->bi_rw,
179 }; 168 };
180 169
170 if (seg == max_segments)
171 break;
172
181 if (q->merge_bvec_fn && 173 if (q->merge_bvec_fn &&
182 q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) 174 q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
183 break; 175 break;
184 176
177 seg++;
185 ret += bv->bv_len >> 9; 178 ret += bv->bv_len >> 9;
186 } 179 }
187 } 180 }
@@ -218,30 +211,10 @@ static void bch_bio_submit_split_endio(struct bio *bio, int error)
218 closure_put(cl); 211 closure_put(cl);
219} 212}
220 213
221static void __bch_bio_submit_split(struct closure *cl)
222{
223 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
224 struct bio *bio = s->bio, *n;
225
226 do {
227 n = bch_bio_split(bio, bch_bio_max_sectors(bio),
228 GFP_NOIO, s->p->bio_split);
229 if (!n)
230 continue_at(cl, __bch_bio_submit_split, system_wq);
231
232 n->bi_end_io = bch_bio_submit_split_endio;
233 n->bi_private = cl;
234
235 closure_get(cl);
236 bch_generic_make_request_hack(n);
237 } while (n != bio);
238
239 continue_at(cl, bch_bio_submit_split_done, NULL);
240}
241
242void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) 214void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
243{ 215{
244 struct bio_split_hook *s; 216 struct bio_split_hook *s;
217 struct bio *n;
245 218
246 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) 219 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
247 goto submit; 220 goto submit;
@@ -250,6 +223,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
250 goto submit; 223 goto submit;
251 224
252 s = mempool_alloc(p->bio_split_hook, GFP_NOIO); 225 s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
226 closure_init(&s->cl, NULL);
253 227
254 s->bio = bio; 228 s->bio = bio;
255 s->p = p; 229 s->p = p;
@@ -257,8 +231,18 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
257 s->bi_private = bio->bi_private; 231 s->bi_private = bio->bi_private;
258 bio_get(bio); 232 bio_get(bio);
259 233
260 closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL); 234 do {
261 return; 235 n = bch_bio_split(bio, bch_bio_max_sectors(bio),
236 GFP_NOIO, s->p->bio_split);
237
238 n->bi_end_io = bch_bio_submit_split_endio;
239 n->bi_private = &s->cl;
240
241 closure_get(&s->cl);
242 bch_generic_make_request_hack(n);
243 } while (n != bio);
244
245 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
262submit: 246submit:
263 bch_generic_make_request_hack(bio); 247 bch_generic_make_request_hack(bio);
264} 248}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 8c8dfdcd9d4c..ba95ab84b2be 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -9,6 +9,8 @@
9#include "debug.h" 9#include "debug.h"
10#include "request.h" 10#include "request.h"
11 11
12#include <trace/events/bcache.h>
13
12/* 14/*
13 * Journal replay/recovery: 15 * Journal replay/recovery:
14 * 16 *
@@ -182,9 +184,14 @@ bsearch:
182 pr_debug("starting binary search, l %u r %u", l, r); 184 pr_debug("starting binary search, l %u r %u", l, r);
183 185
184 while (l + 1 < r) { 186 while (l + 1 < r) {
187 seq = list_entry(list->prev, struct journal_replay,
188 list)->j.seq;
189
185 m = (l + r) >> 1; 190 m = (l + r) >> 1;
191 read_bucket(m);
186 192
187 if (read_bucket(m)) 193 if (seq != list_entry(list->prev, struct journal_replay,
194 list)->j.seq)
188 l = m; 195 l = m;
189 else 196 else
190 r = m; 197 r = m;
@@ -300,7 +307,8 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
300 for (k = i->j.start; 307 for (k = i->j.start;
301 k < end(&i->j); 308 k < end(&i->j);
302 k = bkey_next(k)) { 309 k = bkey_next(k)) {
303 pr_debug("%s", pkey(k)); 310 trace_bcache_journal_replay_key(k);
311
304 bkey_copy(op->keys.top, k); 312 bkey_copy(op->keys.top, k);
305 bch_keylist_push(&op->keys); 313 bch_keylist_push(&op->keys);
306 314
@@ -384,7 +392,7 @@ out:
384 return; 392 return;
385found: 393found:
386 if (btree_node_dirty(best)) 394 if (btree_node_dirty(best))
387 bch_btree_write(best, true, NULL); 395 bch_btree_node_write(best, NULL);
388 rw_unlock(true, best); 396 rw_unlock(true, best);
389} 397}
390 398
@@ -617,7 +625,7 @@ static void journal_write_unlocked(struct closure *cl)
617 bio_reset(bio); 625 bio_reset(bio);
618 bio->bi_sector = PTR_OFFSET(k, i); 626 bio->bi_sector = PTR_OFFSET(k, i);
619 bio->bi_bdev = ca->bdev; 627 bio->bi_bdev = ca->bdev;
620 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH; 628 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
621 bio->bi_size = sectors << 9; 629 bio->bi_size = sectors << 9;
622 630
623 bio->bi_end_io = journal_write_endio; 631 bio->bi_end_io = journal_write_endio;
@@ -712,7 +720,8 @@ void bch_journal(struct closure *cl)
712 spin_lock(&c->journal.lock); 720 spin_lock(&c->journal.lock);
713 721
714 if (journal_full(&c->journal)) { 722 if (journal_full(&c->journal)) {
715 /* XXX: tracepoint */ 723 trace_bcache_journal_full(c);
724
716 closure_wait(&c->journal.wait, cl); 725 closure_wait(&c->journal.wait, cl);
717 726
718 journal_reclaim(c); 727 journal_reclaim(c);
@@ -728,13 +737,15 @@ void bch_journal(struct closure *cl)
728 737
729 if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS || 738 if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
730 b > c->journal.blocks_free) { 739 b > c->journal.blocks_free) {
731 /* XXX: If we were inserting so many keys that they won't fit in 740 trace_bcache_journal_entry_full(c);
741
742 /*
743 * XXX: If we were inserting so many keys that they won't fit in
732 * an _empty_ journal write, we'll deadlock. For now, handle 744 * an _empty_ journal write, we'll deadlock. For now, handle
733 * this in bch_keylist_realloc() - but something to think about. 745 * this in bch_keylist_realloc() - but something to think about.
734 */ 746 */
735 BUG_ON(!w->data->keys); 747 BUG_ON(!w->data->keys);
736 748
737 /* XXX: tracepoint */
738 BUG_ON(!closure_wait(&w->wait, cl)); 749 BUG_ON(!closure_wait(&w->wait, cl));
739 750
740 closure_flush(&c->journal.io); 751 closure_flush(&c->journal.io);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 8589512c972e..1a3b4f4786c3 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -9,6 +9,8 @@
9#include "debug.h" 9#include "debug.h"
10#include "request.h" 10#include "request.h"
11 11
12#include <trace/events/bcache.h>
13
12struct moving_io { 14struct moving_io {
13 struct keybuf_key *w; 15 struct keybuf_key *w;
14 struct search s; 16 struct search s;
@@ -44,14 +46,14 @@ static void write_moving_finish(struct closure *cl)
44{ 46{
45 struct moving_io *io = container_of(cl, struct moving_io, s.cl); 47 struct moving_io *io = container_of(cl, struct moving_io, s.cl);
46 struct bio *bio = &io->bio.bio; 48 struct bio *bio = &io->bio.bio;
47 struct bio_vec *bv = bio_iovec_idx(bio, bio->bi_vcnt); 49 struct bio_vec *bv;
50 int i;
48 51
49 while (bv-- != bio->bi_io_vec) 52 bio_for_each_segment_all(bv, bio, i)
50 __free_page(bv->bv_page); 53 __free_page(bv->bv_page);
51 54
52 pr_debug("%s %s", io->s.op.insert_collision 55 if (io->s.op.insert_collision)
53 ? "collision moving" : "moved", 56 trace_bcache_gc_copy_collision(&io->w->key);
54 pkey(&io->w->key));
55 57
56 bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); 58 bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
57 59
@@ -94,8 +96,6 @@ static void write_moving(struct closure *cl)
94 struct moving_io *io = container_of(s, struct moving_io, s); 96 struct moving_io *io = container_of(s, struct moving_io, s);
95 97
96 if (!s->error) { 98 if (!s->error) {
97 trace_bcache_write_moving(&io->bio.bio);
98
99 moving_init(io); 99 moving_init(io);
100 100
101 io->bio.bio.bi_sector = KEY_START(&io->w->key); 101 io->bio.bio.bi_sector = KEY_START(&io->w->key);
@@ -122,7 +122,6 @@ static void read_moving_submit(struct closure *cl)
122 struct moving_io *io = container_of(s, struct moving_io, s); 122 struct moving_io *io = container_of(s, struct moving_io, s);
123 struct bio *bio = &io->bio.bio; 123 struct bio *bio = &io->bio.bio;
124 124
125 trace_bcache_read_moving(bio);
126 bch_submit_bbio(bio, s->op.c, &io->w->key, 0); 125 bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
127 126
128 continue_at(cl, write_moving, bch_gc_wq); 127 continue_at(cl, write_moving, bch_gc_wq);
@@ -138,7 +137,8 @@ static void read_moving(struct closure *cl)
138 /* XXX: if we error, background writeback could stall indefinitely */ 137 /* XXX: if we error, background writeback could stall indefinitely */
139 138
140 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) { 139 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
141 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, &MAX_KEY); 140 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
141 &MAX_KEY, moving_pred);
142 if (!w) 142 if (!w)
143 break; 143 break;
144 144
@@ -159,10 +159,10 @@ static void read_moving(struct closure *cl)
159 bio->bi_rw = READ; 159 bio->bi_rw = READ;
160 bio->bi_end_io = read_moving_endio; 160 bio->bi_end_io = read_moving_endio;
161 161
162 if (bch_bio_alloc_pages(bio, GFP_KERNEL)) 162 if (bio_alloc_pages(bio, GFP_KERNEL))
163 goto err; 163 goto err;
164 164
165 pr_debug("%s", pkey(&w->key)); 165 trace_bcache_gc_copy(&w->key);
166 166
167 closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl); 167 closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
168 168
@@ -250,5 +250,5 @@ void bch_moving_gc(struct closure *cl)
250 250
251void bch_moving_init_cache_set(struct cache_set *c) 251void bch_moving_init_cache_set(struct cache_set *c)
252{ 252{
253 bch_keybuf_init(&c->moving_gc_keys, moving_pred); 253 bch_keybuf_init(&c->moving_gc_keys);
254} 254}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index e5ff12e52d5b..786a1a4f74d8 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -10,6 +10,7 @@
10#include "btree.h" 10#include "btree.h"
11#include "debug.h" 11#include "debug.h"
12#include "request.h" 12#include "request.h"
13#include "writeback.h"
13 14
14#include <linux/cgroup.h> 15#include <linux/cgroup.h>
15#include <linux/module.h> 16#include <linux/module.h>
@@ -21,8 +22,6 @@
21 22
22#define CUTOFF_CACHE_ADD 95 23#define CUTOFF_CACHE_ADD 95
23#define CUTOFF_CACHE_READA 90 24#define CUTOFF_CACHE_READA 90
24#define CUTOFF_WRITEBACK 50
25#define CUTOFF_WRITEBACK_SYNC 75
26 25
27struct kmem_cache *bch_search_cache; 26struct kmem_cache *bch_search_cache;
28 27
@@ -489,6 +488,12 @@ static void bch_insert_data_loop(struct closure *cl)
489 bch_queue_gc(op->c); 488 bch_queue_gc(op->c);
490 } 489 }
491 490
491 /*
492 * Journal writes are marked REQ_FLUSH; if the original write was a
493 * flush, it'll wait on the journal write.
494 */
495 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
496
492 do { 497 do {
493 unsigned i; 498 unsigned i;
494 struct bkey *k; 499 struct bkey *k;
@@ -510,10 +515,6 @@ static void bch_insert_data_loop(struct closure *cl)
510 goto err; 515 goto err;
511 516
512 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); 517 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
513 if (!n) {
514 __bkey_put(op->c, k);
515 continue_at(cl, bch_insert_data_loop, bcache_wq);
516 }
517 518
518 n->bi_end_io = bch_insert_data_endio; 519 n->bi_end_io = bch_insert_data_endio;
519 n->bi_private = cl; 520 n->bi_private = cl;
@@ -530,10 +531,9 @@ static void bch_insert_data_loop(struct closure *cl)
530 if (KEY_CSUM(k)) 531 if (KEY_CSUM(k))
531 bio_csum(n, k); 532 bio_csum(n, k);
532 533
533 pr_debug("%s", pkey(k)); 534 trace_bcache_cache_insert(k);
534 bch_keylist_push(&op->keys); 535 bch_keylist_push(&op->keys);
535 536
536 trace_bcache_cache_insert(n, n->bi_sector, n->bi_bdev);
537 n->bi_rw |= REQ_WRITE; 537 n->bi_rw |= REQ_WRITE;
538 bch_submit_bbio(n, op->c, k, 0); 538 bch_submit_bbio(n, op->c, k, 0);
539 } while (n != bio); 539 } while (n != bio);
@@ -716,7 +716,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
716 s->task = current; 716 s->task = current;
717 s->orig_bio = bio; 717 s->orig_bio = bio;
718 s->write = (bio->bi_rw & REQ_WRITE) != 0; 718 s->write = (bio->bi_rw & REQ_WRITE) != 0;
719 s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0; 719 s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
720 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0; 720 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
721 s->recoverable = 1; 721 s->recoverable = 1;
722 s->start_time = jiffies; 722 s->start_time = jiffies;
@@ -784,11 +784,8 @@ static void request_read_error(struct closure *cl)
784 int i; 784 int i;
785 785
786 if (s->recoverable) { 786 if (s->recoverable) {
787 /* The cache read failed, but we can retry from the backing 787 /* Retry from the backing device: */
788 * device. 788 trace_bcache_read_retry(s->orig_bio);
789 */
790 pr_debug("recovering at sector %llu",
791 (uint64_t) s->orig_bio->bi_sector);
792 789
793 s->error = 0; 790 s->error = 0;
794 bv = s->bio.bio.bi_io_vec; 791 bv = s->bio.bio.bi_io_vec;
@@ -806,7 +803,6 @@ static void request_read_error(struct closure *cl)
806 803
807 /* XXX: invalidate cache */ 804 /* XXX: invalidate cache */
808 805
809 trace_bcache_read_retry(&s->bio.bio);
810 closure_bio_submit(&s->bio.bio, &s->cl, s->d); 806 closure_bio_submit(&s->bio.bio, &s->cl, s->d);
811 } 807 }
812 808
@@ -827,53 +823,13 @@ static void request_read_done(struct closure *cl)
827 */ 823 */
828 824
829 if (s->op.cache_bio) { 825 if (s->op.cache_bio) {
830 struct bio_vec *src, *dst;
831 unsigned src_offset, dst_offset, bytes;
832 void *dst_ptr;
833
834 bio_reset(s->op.cache_bio); 826 bio_reset(s->op.cache_bio);
835 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector; 827 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
836 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev; 828 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
837 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; 829 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
838 bch_bio_map(s->op.cache_bio, NULL); 830 bch_bio_map(s->op.cache_bio, NULL);
839 831
840 src = bio_iovec(s->op.cache_bio); 832 bio_copy_data(s->cache_miss, s->op.cache_bio);
841 dst = bio_iovec(s->cache_miss);
842 src_offset = src->bv_offset;
843 dst_offset = dst->bv_offset;
844 dst_ptr = kmap(dst->bv_page);
845
846 while (1) {
847 if (dst_offset == dst->bv_offset + dst->bv_len) {
848 kunmap(dst->bv_page);
849 dst++;
850 if (dst == bio_iovec_idx(s->cache_miss,
851 s->cache_miss->bi_vcnt))
852 break;
853
854 dst_offset = dst->bv_offset;
855 dst_ptr = kmap(dst->bv_page);
856 }
857
858 if (src_offset == src->bv_offset + src->bv_len) {
859 src++;
860 if (src == bio_iovec_idx(s->op.cache_bio,
861 s->op.cache_bio->bi_vcnt))
862 BUG();
863
864 src_offset = src->bv_offset;
865 }
866
867 bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
868 src->bv_offset + src->bv_len - src_offset);
869
870 memcpy(dst_ptr + dst_offset,
871 page_address(src->bv_page) + src_offset,
872 bytes);
873
874 src_offset += bytes;
875 dst_offset += bytes;
876 }
877 833
878 bio_put(s->cache_miss); 834 bio_put(s->cache_miss);
879 s->cache_miss = NULL; 835 s->cache_miss = NULL;
@@ -899,6 +855,7 @@ static void request_read_done_bh(struct closure *cl)
899 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 855 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
900 856
901 bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip); 857 bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
858 trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
902 859
903 if (s->error) 860 if (s->error)
904 continue_at_nobarrier(cl, request_read_error, bcache_wq); 861 continue_at_nobarrier(cl, request_read_error, bcache_wq);
@@ -917,9 +874,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
917 struct bio *miss; 874 struct bio *miss;
918 875
919 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 876 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
920 if (!miss)
921 return -EAGAIN;
922
923 if (miss == bio) 877 if (miss == bio)
924 s->op.lookup_done = true; 878 s->op.lookup_done = true;
925 879
@@ -938,8 +892,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
938 reada = min(dc->readahead >> 9, 892 reada = min(dc->readahead >> 9,
939 sectors - bio_sectors(miss)); 893 sectors - bio_sectors(miss));
940 894
941 if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev)) 895 if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
942 reada = bdev_sectors(miss->bi_bdev) - bio_end(miss); 896 reada = bdev_sectors(miss->bi_bdev) -
897 bio_end_sector(miss);
943 } 898 }
944 899
945 s->cache_bio_sectors = bio_sectors(miss) + reada; 900 s->cache_bio_sectors = bio_sectors(miss) + reada;
@@ -963,13 +918,12 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
963 goto out_put; 918 goto out_put;
964 919
965 bch_bio_map(s->op.cache_bio, NULL); 920 bch_bio_map(s->op.cache_bio, NULL);
966 if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) 921 if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
967 goto out_put; 922 goto out_put;
968 923
969 s->cache_miss = miss; 924 s->cache_miss = miss;
970 bio_get(s->op.cache_bio); 925 bio_get(s->op.cache_bio);
971 926
972 trace_bcache_cache_miss(s->orig_bio);
973 closure_bio_submit(s->op.cache_bio, &s->cl, s->d); 927 closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
974 928
975 return ret; 929 return ret;
@@ -1002,24 +956,13 @@ static void cached_dev_write_complete(struct closure *cl)
1002 cached_dev_bio_complete(cl); 956 cached_dev_bio_complete(cl);
1003} 957}
1004 958
1005static bool should_writeback(struct cached_dev *dc, struct bio *bio)
1006{
1007 unsigned threshold = (bio->bi_rw & REQ_SYNC)
1008 ? CUTOFF_WRITEBACK_SYNC
1009 : CUTOFF_WRITEBACK;
1010
1011 return !atomic_read(&dc->disk.detaching) &&
1012 cache_mode(dc, bio) == CACHE_MODE_WRITEBACK &&
1013 dc->disk.c->gc_stats.in_use < threshold;
1014}
1015
1016static void request_write(struct cached_dev *dc, struct search *s) 959static void request_write(struct cached_dev *dc, struct search *s)
1017{ 960{
1018 struct closure *cl = &s->cl; 961 struct closure *cl = &s->cl;
1019 struct bio *bio = &s->bio.bio; 962 struct bio *bio = &s->bio.bio;
1020 struct bkey start, end; 963 struct bkey start, end;
1021 start = KEY(dc->disk.id, bio->bi_sector, 0); 964 start = KEY(dc->disk.id, bio->bi_sector, 0);
1022 end = KEY(dc->disk.id, bio_end(bio), 0); 965 end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1023 966
1024 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); 967 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
1025 968
@@ -1034,22 +977,37 @@ static void request_write(struct cached_dev *dc, struct search *s)
1034 if (bio->bi_rw & REQ_DISCARD) 977 if (bio->bi_rw & REQ_DISCARD)
1035 goto skip; 978 goto skip;
1036 979
980 if (should_writeback(dc, s->orig_bio,
981 cache_mode(dc, bio),
982 s->op.skip)) {
983 s->op.skip = false;
984 s->writeback = true;
985 }
986
1037 if (s->op.skip) 987 if (s->op.skip)
1038 goto skip; 988 goto skip;
1039 989
1040 if (should_writeback(dc, s->orig_bio)) 990 trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
1041 s->writeback = true;
1042 991
1043 if (!s->writeback) { 992 if (!s->writeback) {
1044 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, 993 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1045 dc->disk.bio_split); 994 dc->disk.bio_split);
1046 995
1047 trace_bcache_writethrough(s->orig_bio);
1048 closure_bio_submit(bio, cl, s->d); 996 closure_bio_submit(bio, cl, s->d);
1049 } else { 997 } else {
1050 s->op.cache_bio = bio; 998 bch_writeback_add(dc);
1051 trace_bcache_writeback(s->orig_bio); 999
1052 bch_writeback_add(dc, bio_sectors(bio)); 1000 if (s->op.flush_journal) {
1001 /* Also need to send a flush to the backing device */
1002 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1003 dc->disk.bio_split);
1004
1005 bio->bi_size = 0;
1006 bio->bi_vcnt = 0;
1007 closure_bio_submit(bio, cl, s->d);
1008 } else {
1009 s->op.cache_bio = bio;
1010 }
1053 } 1011 }
1054out: 1012out:
1055 closure_call(&s->op.cl, bch_insert_data, NULL, cl); 1013 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
@@ -1058,7 +1016,6 @@ skip:
1058 s->op.skip = true; 1016 s->op.skip = true;
1059 s->op.cache_bio = s->orig_bio; 1017 s->op.cache_bio = s->orig_bio;
1060 bio_get(s->op.cache_bio); 1018 bio_get(s->op.cache_bio);
1061 trace_bcache_write_skip(s->orig_bio);
1062 1019
1063 if ((bio->bi_rw & REQ_DISCARD) && 1020 if ((bio->bi_rw & REQ_DISCARD) &&
1064 !blk_queue_discard(bdev_get_queue(dc->bdev))) 1021 !blk_queue_discard(bdev_get_queue(dc->bdev)))
@@ -1088,9 +1045,10 @@ static void request_nodata(struct cached_dev *dc, struct search *s)
1088 1045
1089/* Cached devices - read & write stuff */ 1046/* Cached devices - read & write stuff */
1090 1047
1091int bch_get_congested(struct cache_set *c) 1048unsigned bch_get_congested(struct cache_set *c)
1092{ 1049{
1093 int i; 1050 int i;
1051 long rand;
1094 1052
1095 if (!c->congested_read_threshold_us && 1053 if (!c->congested_read_threshold_us &&
1096 !c->congested_write_threshold_us) 1054 !c->congested_write_threshold_us)
@@ -1106,7 +1064,13 @@ int bch_get_congested(struct cache_set *c)
1106 1064
1107 i += CONGESTED_MAX; 1065 i += CONGESTED_MAX;
1108 1066
1109 return i <= 0 ? 1 : fract_exp_two(i, 6); 1067 if (i > 0)
1068 i = fract_exp_two(i, 6);
1069
1070 rand = get_random_int();
1071 i -= bitmap_weight(&rand, BITS_PER_LONG);
1072
1073 return i > 0 ? i : 1;
1110} 1074}
1111 1075
1112static void add_sequential(struct task_struct *t) 1076static void add_sequential(struct task_struct *t)
@@ -1126,10 +1090,8 @@ static void check_should_skip(struct cached_dev *dc, struct search *s)
1126{ 1090{
1127 struct cache_set *c = s->op.c; 1091 struct cache_set *c = s->op.c;
1128 struct bio *bio = &s->bio.bio; 1092 struct bio *bio = &s->bio.bio;
1129
1130 long rand;
1131 int cutoff = bch_get_congested(c);
1132 unsigned mode = cache_mode(dc, bio); 1093 unsigned mode = cache_mode(dc, bio);
1094 unsigned sectors, congested = bch_get_congested(c);
1133 1095
1134 if (atomic_read(&dc->disk.detaching) || 1096 if (atomic_read(&dc->disk.detaching) ||
1135 c->gc_stats.in_use > CUTOFF_CACHE_ADD || 1097 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
@@ -1147,17 +1109,14 @@ static void check_should_skip(struct cached_dev *dc, struct search *s)
1147 goto skip; 1109 goto skip;
1148 } 1110 }
1149 1111
1150 if (!cutoff) { 1112 if (!congested && !dc->sequential_cutoff)
1151 cutoff = dc->sequential_cutoff >> 9; 1113 goto rescale;
1152 1114
1153 if (!cutoff) 1115 if (!congested &&
1154 goto rescale; 1116 mode == CACHE_MODE_WRITEBACK &&
1155 1117 (bio->bi_rw & REQ_WRITE) &&
1156 if (mode == CACHE_MODE_WRITEBACK && 1118 (bio->bi_rw & REQ_SYNC))
1157 (bio->bi_rw & REQ_WRITE) && 1119 goto rescale;
1158 (bio->bi_rw & REQ_SYNC))
1159 goto rescale;
1160 }
1161 1120
1162 if (dc->sequential_merge) { 1121 if (dc->sequential_merge) {
1163 struct io *i; 1122 struct io *i;
@@ -1177,7 +1136,7 @@ found:
1177 if (i->sequential + bio->bi_size > i->sequential) 1136 if (i->sequential + bio->bi_size > i->sequential)
1178 i->sequential += bio->bi_size; 1137 i->sequential += bio->bi_size;
1179 1138
1180 i->last = bio_end(bio); 1139 i->last = bio_end_sector(bio);
1181 i->jiffies = jiffies + msecs_to_jiffies(5000); 1140 i->jiffies = jiffies + msecs_to_jiffies(5000);
1182 s->task->sequential_io = i->sequential; 1141 s->task->sequential_io = i->sequential;
1183 1142
@@ -1192,12 +1151,19 @@ found:
1192 add_sequential(s->task); 1151 add_sequential(s->task);
1193 } 1152 }
1194 1153
1195 rand = get_random_int(); 1154 sectors = max(s->task->sequential_io,
1196 cutoff -= bitmap_weight(&rand, BITS_PER_LONG); 1155 s->task->sequential_io_avg) >> 9;
1197 1156
1198 if (cutoff <= (int) (max(s->task->sequential_io, 1157 if (dc->sequential_cutoff &&
1199 s->task->sequential_io_avg) >> 9)) 1158 sectors >= dc->sequential_cutoff >> 9) {
1159 trace_bcache_bypass_sequential(s->orig_bio);
1200 goto skip; 1160 goto skip;
1161 }
1162
1163 if (congested && sectors >= congested) {
1164 trace_bcache_bypass_congested(s->orig_bio);
1165 goto skip;
1166 }
1201 1167
1202rescale: 1168rescale:
1203 bch_rescale_priorities(c, bio_sectors(bio)); 1169 bch_rescale_priorities(c, bio_sectors(bio));
@@ -1288,30 +1254,25 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
1288static int flash_dev_cache_miss(struct btree *b, struct search *s, 1254static int flash_dev_cache_miss(struct btree *b, struct search *s,
1289 struct bio *bio, unsigned sectors) 1255 struct bio *bio, unsigned sectors)
1290{ 1256{
1257 struct bio_vec *bv;
1258 int i;
1259
1291 /* Zero fill bio */ 1260 /* Zero fill bio */
1292 1261
1293 while (bio->bi_idx != bio->bi_vcnt) { 1262 bio_for_each_segment(bv, bio, i) {
1294 struct bio_vec *bv = bio_iovec(bio);
1295 unsigned j = min(bv->bv_len >> 9, sectors); 1263 unsigned j = min(bv->bv_len >> 9, sectors);
1296 1264
1297 void *p = kmap(bv->bv_page); 1265 void *p = kmap(bv->bv_page);
1298 memset(p + bv->bv_offset, 0, j << 9); 1266 memset(p + bv->bv_offset, 0, j << 9);
1299 kunmap(bv->bv_page); 1267 kunmap(bv->bv_page);
1300 1268
1301 bv->bv_len -= j << 9; 1269 sectors -= j;
1302 bv->bv_offset += j << 9;
1303
1304 if (bv->bv_len)
1305 return 0;
1306
1307 bio->bi_sector += j;
1308 bio->bi_size -= j << 9;
1309
1310 bio->bi_idx++;
1311 sectors -= j;
1312 } 1270 }
1313 1271
1314 s->op.lookup_done = true; 1272 bio_advance(bio, min(sectors << 9, bio->bi_size));
1273
1274 if (!bio->bi_size)
1275 s->op.lookup_done = true;
1315 1276
1316 return 0; 1277 return 0;
1317} 1278}
@@ -1338,8 +1299,8 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1338 closure_call(&s->op.cl, btree_read_async, NULL, cl); 1299 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1339 } else if (bio_has_data(bio) || s->op.skip) { 1300 } else if (bio_has_data(bio) || s->op.skip) {
1340 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, 1301 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1341 &KEY(d->id, bio->bi_sector, 0), 1302 &KEY(d->id, bio->bi_sector, 0),
1342 &KEY(d->id, bio_end(bio), 0)); 1303 &KEY(d->id, bio_end_sector(bio), 0));
1343 1304
1344 s->writeback = true; 1305 s->writeback = true;
1345 s->op.cache_bio = bio; 1306 s->op.cache_bio = bio;
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 254d9ab5707c..57dc4784f4f4 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -30,7 +30,7 @@ struct search {
30}; 30};
31 31
32void bch_cache_read_endio(struct bio *, int); 32void bch_cache_read_endio(struct bio *, int);
33int bch_get_congested(struct cache_set *); 33unsigned bch_get_congested(struct cache_set *);
34void bch_insert_data(struct closure *cl); 34void bch_insert_data(struct closure *cl);
35void bch_btree_insert_async(struct closure *); 35void bch_btree_insert_async(struct closure *);
36void bch_cache_read_endio(struct bio *, int); 36void bch_cache_read_endio(struct bio *, int);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f88e2b653a3f..547c4c57b052 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -10,10 +10,13 @@
10#include "btree.h" 10#include "btree.h"
11#include "debug.h" 11#include "debug.h"
12#include "request.h" 12#include "request.h"
13#include "writeback.h"
13 14
15#include <linux/blkdev.h>
14#include <linux/buffer_head.h> 16#include <linux/buffer_head.h>
15#include <linux/debugfs.h> 17#include <linux/debugfs.h>
16#include <linux/genhd.h> 18#include <linux/genhd.h>
19#include <linux/kthread.h>
17#include <linux/module.h> 20#include <linux/module.h>
18#include <linux/random.h> 21#include <linux/random.h>
19#include <linux/reboot.h> 22#include <linux/reboot.h>
@@ -342,6 +345,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
342 struct closure *cl = &c->uuid_write.cl; 345 struct closure *cl = &c->uuid_write.cl;
343 struct uuid_entry *u; 346 struct uuid_entry *u;
344 unsigned i; 347 unsigned i;
348 char buf[80];
345 349
346 BUG_ON(!parent); 350 BUG_ON(!parent);
347 closure_lock(&c->uuid_write, parent); 351 closure_lock(&c->uuid_write, parent);
@@ -362,8 +366,8 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
362 break; 366 break;
363 } 367 }
364 368
365 pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", 369 bch_bkey_to_text(buf, sizeof(buf), k);
366 pkey(&c->uuid_bucket)); 370 pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
367 371
368 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 372 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
369 if (!bch_is_zero(u->uuid, 16)) 373 if (!bch_is_zero(u->uuid, 16))
@@ -543,7 +547,6 @@ void bch_prio_write(struct cache *ca)
543 547
544 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 548 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
545 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 549 fifo_used(&ca->free_inc), fifo_used(&ca->unused));
546 blktrace_msg(ca, "Starting priorities: " buckets_free(ca));
547 550
548 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 551 for (i = prio_buckets(ca) - 1; i >= 0; --i) {
549 long bucket; 552 long bucket;
@@ -704,7 +707,8 @@ static void bcache_device_detach(struct bcache_device *d)
704 atomic_set(&d->detaching, 0); 707 atomic_set(&d->detaching, 0);
705 } 708 }
706 709
707 bcache_device_unlink(d); 710 if (!d->flush_done)
711 bcache_device_unlink(d);
708 712
709 d->c->devices[d->id] = NULL; 713 d->c->devices[d->id] = NULL;
710 closure_put(&d->c->caching); 714 closure_put(&d->c->caching);
@@ -743,13 +747,35 @@ static void bcache_device_free(struct bcache_device *d)
743 mempool_destroy(d->unaligned_bvec); 747 mempool_destroy(d->unaligned_bvec);
744 if (d->bio_split) 748 if (d->bio_split)
745 bioset_free(d->bio_split); 749 bioset_free(d->bio_split);
750 if (is_vmalloc_addr(d->stripe_sectors_dirty))
751 vfree(d->stripe_sectors_dirty);
752 else
753 kfree(d->stripe_sectors_dirty);
746 754
747 closure_debug_destroy(&d->cl); 755 closure_debug_destroy(&d->cl);
748} 756}
749 757
750static int bcache_device_init(struct bcache_device *d, unsigned block_size) 758static int bcache_device_init(struct bcache_device *d, unsigned block_size,
759 sector_t sectors)
751{ 760{
752 struct request_queue *q; 761 struct request_queue *q;
762 size_t n;
763
764 if (!d->stripe_size_bits)
765 d->stripe_size_bits = 31;
766
767 d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >>
768 d->stripe_size_bits;
769
770 if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t))
771 return -ENOMEM;
772
773 n = d->nr_stripes * sizeof(atomic_t);
774 d->stripe_sectors_dirty = n < PAGE_SIZE << 6
775 ? kzalloc(n, GFP_KERNEL)
776 : vzalloc(n);
777 if (!d->stripe_sectors_dirty)
778 return -ENOMEM;
753 779
754 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 780 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
755 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 781 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
@@ -759,6 +785,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
759 !(q = blk_alloc_queue(GFP_KERNEL))) 785 !(q = blk_alloc_queue(GFP_KERNEL)))
760 return -ENOMEM; 786 return -ENOMEM;
761 787
788 set_capacity(d->disk, sectors);
762 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); 789 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
763 790
764 d->disk->major = bcache_major; 791 d->disk->major = bcache_major;
@@ -781,6 +808,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
781 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 808 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
782 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 809 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
783 810
811 blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
812
784 return 0; 813 return 0;
785} 814}
786 815
@@ -800,6 +829,17 @@ static void calc_cached_dev_sectors(struct cache_set *c)
800void bch_cached_dev_run(struct cached_dev *dc) 829void bch_cached_dev_run(struct cached_dev *dc)
801{ 830{
802 struct bcache_device *d = &dc->disk; 831 struct bcache_device *d = &dc->disk;
832 char buf[SB_LABEL_SIZE + 1];
833 char *env[] = {
834 "DRIVER=bcache",
835 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
836 NULL,
837 NULL,
838 };
839
840 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
841 buf[SB_LABEL_SIZE] = '\0';
842 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
803 843
804 if (atomic_xchg(&dc->running, 1)) 844 if (atomic_xchg(&dc->running, 1))
805 return; 845 return;
@@ -816,10 +856,12 @@ void bch_cached_dev_run(struct cached_dev *dc)
816 856
817 add_disk(d->disk); 857 add_disk(d->disk);
818 bd_link_disk_holder(dc->bdev, dc->disk.disk); 858 bd_link_disk_holder(dc->bdev, dc->disk.disk);
819#if 0 859 /* won't show up in the uevent file, use udevadm monitor -e instead
820 char *env[] = { "SYMLINK=label" , NULL }; 860 * only class / kset properties are persistent */
821 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 861 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
822#endif 862 kfree(env[1]);
863 kfree(env[2]);
864
823 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 865 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
824 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 866 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
825 pr_debug("error creating sysfs link"); 867 pr_debug("error creating sysfs link");
@@ -960,6 +1002,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
960 atomic_set(&dc->count, 1); 1002 atomic_set(&dc->count, 1);
961 1003
962 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1004 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1005 bch_sectors_dirty_init(dc);
963 atomic_set(&dc->has_dirty, 1); 1006 atomic_set(&dc->has_dirty, 1);
964 atomic_inc(&dc->count); 1007 atomic_inc(&dc->count);
965 bch_writeback_queue(dc); 1008 bch_writeback_queue(dc);
@@ -1014,6 +1057,14 @@ static void cached_dev_flush(struct closure *cl)
1014 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1057 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1015 struct bcache_device *d = &dc->disk; 1058 struct bcache_device *d = &dc->disk;
1016 1059
1060 mutex_lock(&bch_register_lock);
1061 d->flush_done = 1;
1062
1063 if (d->c)
1064 bcache_device_unlink(d);
1065
1066 mutex_unlock(&bch_register_lock);
1067
1017 bch_cache_accounting_destroy(&dc->accounting); 1068 bch_cache_accounting_destroy(&dc->accounting);
1018 kobject_del(&d->kobj); 1069 kobject_del(&d->kobj);
1019 1070
@@ -1045,7 +1096,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1045 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1096 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1046 } 1097 }
1047 1098
1048 ret = bcache_device_init(&dc->disk, block_size); 1099 ret = bcache_device_init(&dc->disk, block_size,
1100 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1049 if (ret) 1101 if (ret)
1050 return ret; 1102 return ret;
1051 1103
@@ -1144,11 +1196,10 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1144 1196
1145 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1197 kobject_init(&d->kobj, &bch_flash_dev_ktype);
1146 1198
1147 if (bcache_device_init(d, block_bytes(c))) 1199 if (bcache_device_init(d, block_bytes(c), u->sectors))
1148 goto err; 1200 goto err;
1149 1201
1150 bcache_device_attach(d, c, u - c->uuids); 1202 bcache_device_attach(d, c, u - c->uuids);
1151 set_capacity(d->disk, u->sectors);
1152 bch_flash_dev_request_init(d); 1203 bch_flash_dev_request_init(d);
1153 add_disk(d->disk); 1204 add_disk(d->disk);
1154 1205
@@ -1255,9 +1306,10 @@ static void cache_set_free(struct closure *cl)
1255 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1306 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
1256 free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); 1307 free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
1257 1308
1258 kfree(c->fill_iter);
1259 if (c->bio_split) 1309 if (c->bio_split)
1260 bioset_free(c->bio_split); 1310 bioset_free(c->bio_split);
1311 if (c->fill_iter)
1312 mempool_destroy(c->fill_iter);
1261 if (c->bio_meta) 1313 if (c->bio_meta)
1262 mempool_destroy(c->bio_meta); 1314 mempool_destroy(c->bio_meta);
1263 if (c->search) 1315 if (c->search)
@@ -1278,11 +1330,9 @@ static void cache_set_free(struct closure *cl)
1278static void cache_set_flush(struct closure *cl) 1330static void cache_set_flush(struct closure *cl)
1279{ 1331{
1280 struct cache_set *c = container_of(cl, struct cache_set, caching); 1332 struct cache_set *c = container_of(cl, struct cache_set, caching);
1333 struct cache *ca;
1281 struct btree *b; 1334 struct btree *b;
1282 1335 unsigned i;
1283 /* Shut down allocator threads */
1284 set_bit(CACHE_SET_STOPPING_2, &c->flags);
1285 wake_up(&c->alloc_wait);
1286 1336
1287 bch_cache_accounting_destroy(&c->accounting); 1337 bch_cache_accounting_destroy(&c->accounting);
1288 1338
@@ -1295,7 +1345,11 @@ static void cache_set_flush(struct closure *cl)
1295 /* Should skip this if we're unregistering because of an error */ 1345 /* Should skip this if we're unregistering because of an error */
1296 list_for_each_entry(b, &c->btree_cache, list) 1346 list_for_each_entry(b, &c->btree_cache, list)
1297 if (btree_node_dirty(b)) 1347 if (btree_node_dirty(b))
1298 bch_btree_write(b, true, NULL); 1348 bch_btree_node_write(b, NULL);
1349
1350 for_each_cache(ca, c, i)
1351 if (ca->alloc_thread)
1352 kthread_stop(ca->alloc_thread);
1299 1353
1300 closure_return(cl); 1354 closure_return(cl);
1301} 1355}
@@ -1303,18 +1357,22 @@ static void cache_set_flush(struct closure *cl)
1303static void __cache_set_unregister(struct closure *cl) 1357static void __cache_set_unregister(struct closure *cl)
1304{ 1358{
1305 struct cache_set *c = container_of(cl, struct cache_set, caching); 1359 struct cache_set *c = container_of(cl, struct cache_set, caching);
1306 struct cached_dev *dc, *t; 1360 struct cached_dev *dc;
1307 size_t i; 1361 size_t i;
1308 1362
1309 mutex_lock(&bch_register_lock); 1363 mutex_lock(&bch_register_lock);
1310 1364
1311 if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
1312 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
1313 bch_cached_dev_detach(dc);
1314
1315 for (i = 0; i < c->nr_uuids; i++) 1365 for (i = 0; i < c->nr_uuids; i++)
1316 if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i])) 1366 if (c->devices[i]) {
1317 bcache_device_stop(c->devices[i]); 1367 if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1368 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1369 dc = container_of(c->devices[i],
1370 struct cached_dev, disk);
1371 bch_cached_dev_detach(dc);
1372 } else {
1373 bcache_device_stop(c->devices[i]);
1374 }
1375 }
1318 1376
1319 mutex_unlock(&bch_register_lock); 1377 mutex_unlock(&bch_register_lock);
1320 1378
@@ -1373,9 +1431,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1373 c->btree_pages = max_t(int, c->btree_pages / 4, 1431 c->btree_pages = max_t(int, c->btree_pages / 4,
1374 BTREE_MAX_PAGES); 1432 BTREE_MAX_PAGES);
1375 1433
1376 init_waitqueue_head(&c->alloc_wait); 1434 c->sort_crit_factor = int_sqrt(c->btree_pages);
1435
1377 mutex_init(&c->bucket_lock); 1436 mutex_init(&c->bucket_lock);
1378 mutex_init(&c->fill_lock);
1379 mutex_init(&c->sort_lock); 1437 mutex_init(&c->sort_lock);
1380 spin_lock_init(&c->sort_time_lock); 1438 spin_lock_init(&c->sort_time_lock);
1381 closure_init_unlocked(&c->sb_write); 1439 closure_init_unlocked(&c->sb_write);
@@ -1401,8 +1459,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1401 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1459 !(c->bio_meta = mempool_create_kmalloc_pool(2,
1402 sizeof(struct bbio) + sizeof(struct bio_vec) * 1460 sizeof(struct bbio) + sizeof(struct bio_vec) *
1403 bucket_pages(c))) || 1461 bucket_pages(c))) ||
1462 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
1404 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 1463 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
1405 !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) ||
1406 !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || 1464 !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
1407 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1465 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
1408 bch_journal_alloc(c) || 1466 bch_journal_alloc(c) ||
@@ -1410,8 +1468,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1410 bch_open_buckets_alloc(c)) 1468 bch_open_buckets_alloc(c))
1411 goto err; 1469 goto err;
1412 1470
1413 c->fill_iter->size = sb->bucket_size / sb->block_size;
1414
1415 c->congested_read_threshold_us = 2000; 1471 c->congested_read_threshold_us = 2000;
1416 c->congested_write_threshold_us = 20000; 1472 c->congested_write_threshold_us = 20000;
1417 c->error_limit = 8 << IO_ERROR_SHIFT; 1473 c->error_limit = 8 << IO_ERROR_SHIFT;
@@ -1496,9 +1552,10 @@ static void run_cache_set(struct cache_set *c)
1496 */ 1552 */
1497 bch_journal_next(&c->journal); 1553 bch_journal_next(&c->journal);
1498 1554
1555 err = "error starting allocator thread";
1499 for_each_cache(ca, c, i) 1556 for_each_cache(ca, c, i)
1500 closure_call(&ca->alloc, bch_allocator_thread, 1557 if (bch_cache_allocator_start(ca))
1501 system_wq, &c->cl); 1558 goto err;
1502 1559
1503 /* 1560 /*
1504 * First place it's safe to allocate: btree_check() and 1561 * First place it's safe to allocate: btree_check() and
@@ -1531,17 +1588,16 @@ static void run_cache_set(struct cache_set *c)
1531 1588
1532 bch_btree_gc_finish(c); 1589 bch_btree_gc_finish(c);
1533 1590
1591 err = "error starting allocator thread";
1534 for_each_cache(ca, c, i) 1592 for_each_cache(ca, c, i)
1535 closure_call(&ca->alloc, bch_allocator_thread, 1593 if (bch_cache_allocator_start(ca))
1536 ca->alloc_workqueue, &c->cl); 1594 goto err;
1537 1595
1538 mutex_lock(&c->bucket_lock); 1596 mutex_lock(&c->bucket_lock);
1539 for_each_cache(ca, c, i) 1597 for_each_cache(ca, c, i)
1540 bch_prio_write(ca); 1598 bch_prio_write(ca);
1541 mutex_unlock(&c->bucket_lock); 1599 mutex_unlock(&c->bucket_lock);
1542 1600
1543 wake_up(&c->alloc_wait);
1544
1545 err = "cannot allocate new UUID bucket"; 1601 err = "cannot allocate new UUID bucket";
1546 if (__uuid_write(c)) 1602 if (__uuid_write(c))
1547 goto err_unlock_gc; 1603 goto err_unlock_gc;
@@ -1552,7 +1608,7 @@ static void run_cache_set(struct cache_set *c)
1552 goto err_unlock_gc; 1608 goto err_unlock_gc;
1553 1609
1554 bkey_copy_key(&c->root->key, &MAX_KEY); 1610 bkey_copy_key(&c->root->key, &MAX_KEY);
1555 bch_btree_write(c->root, true, &op); 1611 bch_btree_node_write(c->root, &op.cl);
1556 1612
1557 bch_btree_set_root(c->root); 1613 bch_btree_set_root(c->root);
1558 rw_unlock(true, c->root); 1614 rw_unlock(true, c->root);
@@ -1673,9 +1729,6 @@ void bch_cache_release(struct kobject *kobj)
1673 1729
1674 bio_split_pool_free(&ca->bio_split_hook); 1730 bio_split_pool_free(&ca->bio_split_hook);
1675 1731
1676 if (ca->alloc_workqueue)
1677 destroy_workqueue(ca->alloc_workqueue);
1678
1679 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1732 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
1680 kfree(ca->prio_buckets); 1733 kfree(ca->prio_buckets);
1681 vfree(ca->buckets); 1734 vfree(ca->buckets);
@@ -1723,7 +1776,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1723 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1776 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1724 2, GFP_KERNEL)) || 1777 2, GFP_KERNEL)) ||
1725 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1778 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
1726 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
1727 bio_split_pool_init(&ca->bio_split_hook)) 1779 bio_split_pool_init(&ca->bio_split_hook))
1728 return -ENOMEM; 1780 return -ENOMEM;
1729 1781
@@ -1786,6 +1838,36 @@ static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
1786kobj_attribute_write(register, register_bcache); 1838kobj_attribute_write(register, register_bcache);
1787kobj_attribute_write(register_quiet, register_bcache); 1839kobj_attribute_write(register_quiet, register_bcache);
1788 1840
1841static bool bch_is_open_backing(struct block_device *bdev) {
1842 struct cache_set *c, *tc;
1843 struct cached_dev *dc, *t;
1844
1845 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1846 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
1847 if (dc->bdev == bdev)
1848 return true;
1849 list_for_each_entry_safe(dc, t, &uncached_devices, list)
1850 if (dc->bdev == bdev)
1851 return true;
1852 return false;
1853}
1854
1855static bool bch_is_open_cache(struct block_device *bdev) {
1856 struct cache_set *c, *tc;
1857 struct cache *ca;
1858 unsigned i;
1859
1860 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1861 for_each_cache(ca, c, i)
1862 if (ca->bdev == bdev)
1863 return true;
1864 return false;
1865}
1866
1867static bool bch_is_open(struct block_device *bdev) {
1868 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
1869}
1870
1789static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1871static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1790 const char *buffer, size_t size) 1872 const char *buffer, size_t size)
1791{ 1873{
@@ -1810,8 +1892,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1810 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1892 FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1811 sb); 1893 sb);
1812 if (IS_ERR(bdev)) { 1894 if (IS_ERR(bdev)) {
1813 if (bdev == ERR_PTR(-EBUSY)) 1895 if (bdev == ERR_PTR(-EBUSY)) {
1814 err = "device busy"; 1896 bdev = lookup_bdev(strim(path));
1897 if (!IS_ERR(bdev) && bch_is_open(bdev))
1898 err = "device already registered";
1899 else
1900 err = "device busy";
1901 }
1815 goto err; 1902 goto err;
1816 } 1903 }
1817 1904
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4d9cca47e4c6..12a2c2846f99 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -9,7 +9,9 @@
9#include "sysfs.h" 9#include "sysfs.h"
10#include "btree.h" 10#include "btree.h"
11#include "request.h" 11#include "request.h"
12#include "writeback.h"
12 13
14#include <linux/blkdev.h>
13#include <linux/sort.h> 15#include <linux/sort.h>
14 16
15static const char * const cache_replacement_policies[] = { 17static const char * const cache_replacement_policies[] = {
@@ -79,6 +81,9 @@ rw_attribute(writeback_rate_p_term_inverse);
79rw_attribute(writeback_rate_d_smooth); 81rw_attribute(writeback_rate_d_smooth);
80read_attribute(writeback_rate_debug); 82read_attribute(writeback_rate_debug);
81 83
84read_attribute(stripe_size);
85read_attribute(partial_stripes_expensive);
86
82rw_attribute(synchronous); 87rw_attribute(synchronous);
83rw_attribute(journal_delay_ms); 88rw_attribute(journal_delay_ms);
84rw_attribute(discard); 89rw_attribute(discard);
@@ -127,7 +132,7 @@ SHOW(__bch_cached_dev)
127 char derivative[20]; 132 char derivative[20];
128 char target[20]; 133 char target[20];
129 bch_hprint(dirty, 134 bch_hprint(dirty,
130 atomic_long_read(&dc->disk.sectors_dirty) << 9); 135 bcache_dev_sectors_dirty(&dc->disk) << 9);
131 bch_hprint(derivative, dc->writeback_rate_derivative << 9); 136 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
132 bch_hprint(target, dc->writeback_rate_target << 9); 137 bch_hprint(target, dc->writeback_rate_target << 9);
133 138
@@ -143,7 +148,10 @@ SHOW(__bch_cached_dev)
143 } 148 }
144 149
145 sysfs_hprint(dirty_data, 150 sysfs_hprint(dirty_data,
146 atomic_long_read(&dc->disk.sectors_dirty) << 9); 151 bcache_dev_sectors_dirty(&dc->disk) << 9);
152
153 sysfs_hprint(stripe_size, (1 << dc->disk.stripe_size_bits) << 9);
154 var_printf(partial_stripes_expensive, "%u");
147 155
148 var_printf(sequential_merge, "%i"); 156 var_printf(sequential_merge, "%i");
149 var_hprint(sequential_cutoff); 157 var_hprint(sequential_cutoff);
@@ -170,6 +178,7 @@ STORE(__cached_dev)
170 disk.kobj); 178 disk.kobj);
171 unsigned v = size; 179 unsigned v = size;
172 struct cache_set *c; 180 struct cache_set *c;
181 struct kobj_uevent_env *env;
173 182
174#define d_strtoul(var) sysfs_strtoul(var, dc->var) 183#define d_strtoul(var) sysfs_strtoul(var, dc->var)
175#define d_strtoi_h(var) sysfs_hatoi(var, dc->var) 184#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
@@ -214,6 +223,7 @@ STORE(__cached_dev)
214 } 223 }
215 224
216 if (attr == &sysfs_label) { 225 if (attr == &sysfs_label) {
226 /* note: endlines are preserved */
217 memcpy(dc->sb.label, buf, SB_LABEL_SIZE); 227 memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
218 bch_write_bdev_super(dc, NULL); 228 bch_write_bdev_super(dc, NULL);
219 if (dc->disk.c) { 229 if (dc->disk.c) {
@@ -221,6 +231,15 @@ STORE(__cached_dev)
221 buf, SB_LABEL_SIZE); 231 buf, SB_LABEL_SIZE);
222 bch_uuid_write(dc->disk.c); 232 bch_uuid_write(dc->disk.c);
223 } 233 }
234 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
235 if (!env)
236 return -ENOMEM;
237 add_uevent_var(env, "DRIVER=bcache");
238 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
239 add_uevent_var(env, "CACHED_LABEL=%s", buf);
240 kobject_uevent_env(
241 &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
242 kfree(env);
224 } 243 }
225 244
226 if (attr == &sysfs_attach) { 245 if (attr == &sysfs_attach) {
@@ -284,6 +303,8 @@ static struct attribute *bch_cached_dev_files[] = {
284 &sysfs_writeback_rate_d_smooth, 303 &sysfs_writeback_rate_d_smooth,
285 &sysfs_writeback_rate_debug, 304 &sysfs_writeback_rate_debug,
286 &sysfs_dirty_data, 305 &sysfs_dirty_data,
306 &sysfs_stripe_size,
307 &sysfs_partial_stripes_expensive,
287 &sysfs_sequential_cutoff, 308 &sysfs_sequential_cutoff,
288 &sysfs_sequential_merge, 309 &sysfs_sequential_merge,
289 &sysfs_clear_stats, 310 &sysfs_clear_stats,
@@ -665,12 +686,10 @@ SHOW(__bch_cache)
665 int cmp(const void *l, const void *r) 686 int cmp(const void *l, const void *r)
666 { return *((uint16_t *) r) - *((uint16_t *) l); } 687 { return *((uint16_t *) r) - *((uint16_t *) l); }
667 688
668 /* Number of quantiles we compute */
669 const unsigned nq = 31;
670
671 size_t n = ca->sb.nbuckets, i, unused, btree; 689 size_t n = ca->sb.nbuckets, i, unused, btree;
672 uint64_t sum = 0; 690 uint64_t sum = 0;
673 uint16_t q[nq], *p, *cached; 691 /* Compute 31 quantiles */
692 uint16_t q[31], *p, *cached;
674 ssize_t ret; 693 ssize_t ret;
675 694
676 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t)); 695 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
@@ -703,26 +722,29 @@ SHOW(__bch_cache)
703 if (n) 722 if (n)
704 do_div(sum, n); 723 do_div(sum, n);
705 724
706 for (i = 0; i < nq; i++) 725 for (i = 0; i < ARRAY_SIZE(q); i++)
707 q[i] = INITIAL_PRIO - cached[n * (i + 1) / (nq + 1)]; 726 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
727 (ARRAY_SIZE(q) + 1)];
708 728
709 vfree(p); 729 vfree(p);
710 730
711 ret = snprintf(buf, PAGE_SIZE, 731 ret = scnprintf(buf, PAGE_SIZE,
712 "Unused: %zu%%\n" 732 "Unused: %zu%%\n"
713 "Metadata: %zu%%\n" 733 "Metadata: %zu%%\n"
714 "Average: %llu\n" 734 "Average: %llu\n"
715 "Sectors per Q: %zu\n" 735 "Sectors per Q: %zu\n"
716 "Quantiles: [", 736 "Quantiles: [",
717 unused * 100 / (size_t) ca->sb.nbuckets, 737 unused * 100 / (size_t) ca->sb.nbuckets,
718 btree * 100 / (size_t) ca->sb.nbuckets, sum, 738 btree * 100 / (size_t) ca->sb.nbuckets, sum,
719 n * ca->sb.bucket_size / (nq + 1)); 739 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
720 740
721 for (i = 0; i < nq && ret < (ssize_t) PAGE_SIZE; i++) 741 for (i = 0; i < ARRAY_SIZE(q); i++)
722 ret += snprintf(buf + ret, PAGE_SIZE - ret, 742 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
723 i < nq - 1 ? "%u " : "%u]\n", q[i]); 743 "%u ", q[i]);
724 744 ret--;
725 buf[PAGE_SIZE - 1] = '\0'; 745
746 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
747
726 return ret; 748 return ret;
727 } 749 }
728 750
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index 983f9bb411bc..f7b6c197f90f 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -2,6 +2,7 @@
2#include "btree.h" 2#include "btree.h"
3#include "request.h" 3#include "request.h"
4 4
5#include <linux/blktrace_api.h>
5#include <linux/module.h> 6#include <linux/module.h>
6 7
7#define CREATE_TRACE_POINTS 8#define CREATE_TRACE_POINTS
@@ -9,18 +10,44 @@
9 10
10EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_start); 11EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_start);
11EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_end); 12EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_end);
12EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_passthrough); 13
13EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_hit); 14EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_bypass_sequential);
14EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_miss); 15EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_bypass_congested);
16
17EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read);
18EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write);
15EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_retry); 19EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_retry);
16EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writethrough); 20
17EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback); 21EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert);
18EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_skip); 22
23EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_replay_key);
24EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write);
25EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_full);
26EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_entry_full);
27
28EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_cache_cannibalize);
29
19EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_read); 30EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_read);
20EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_write); 31EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_write);
21EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_dirty); 32
22EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_dirty); 33EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_alloc);
23EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write); 34EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_alloc_fail);
24EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert); 35EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_free);
36
37EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_gc_coalesce);
25EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_start); 38EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_start);
26EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_end); 39EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_end);
40EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_copy);
41EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_copy_collision);
42
43EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_insert_key);
44
45EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split);
46EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact);
47EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root);
48
49EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_invalidate);
50EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail);
51
52EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
53EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback_collision);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index da3a99e85b1e..98eb81159a22 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -228,23 +228,6 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
228 } 228 }
229} 229}
230 230
231int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp)
232{
233 int i;
234 struct bio_vec *bv;
235
236 bio_for_each_segment(bv, bio, i) {
237 bv->bv_page = alloc_page(gfp);
238 if (!bv->bv_page) {
239 while (bv-- != bio->bi_io_vec + bio->bi_idx)
240 __free_page(bv->bv_page);
241 return -ENOMEM;
242 }
243 }
244
245 return 0;
246}
247
248/* 231/*
249 * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any 232 * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
250 * use permitted, subject to terms of PostgreSQL license; see.) 233 * use permitted, subject to terms of PostgreSQL license; see.)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 577393e38c3a..1ae2a73ad85f 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -15,8 +15,6 @@
15 15
16struct closure; 16struct closure;
17 17
18#include <trace/events/bcache.h>
19
20#ifdef CONFIG_BCACHE_EDEBUG 18#ifdef CONFIG_BCACHE_EDEBUG
21 19
22#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) 20#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
@@ -566,12 +564,8 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
566 return x; 564 return x;
567} 565}
568 566
569#define bio_end(bio) ((bio)->bi_sector + bio_sectors(bio))
570
571void bch_bio_map(struct bio *bio, void *base); 567void bch_bio_map(struct bio *bio, void *base);
572 568
573int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp);
574
575static inline sector_t bdev_sectors(struct block_device *bdev) 569static inline sector_t bdev_sectors(struct block_device *bdev)
576{ 570{
577 return bdev->bd_inode->i_size >> 9; 571 return bdev->bd_inode->i_size >> 9;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 2714ed3991d1..22cbff551628 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -9,6 +9,9 @@
9#include "bcache.h" 9#include "bcache.h"
10#include "btree.h" 10#include "btree.h"
11#include "debug.h" 11#include "debug.h"
12#include "writeback.h"
13
14#include <trace/events/bcache.h>
12 15
13static struct workqueue_struct *dirty_wq; 16static struct workqueue_struct *dirty_wq;
14 17
@@ -36,7 +39,7 @@ static void __update_writeback_rate(struct cached_dev *dc)
36 39
37 int change = 0; 40 int change = 0;
38 int64_t error; 41 int64_t error;
39 int64_t dirty = atomic_long_read(&dc->disk.sectors_dirty); 42 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
40 int64_t derivative = dirty - dc->disk.sectors_dirty_last; 43 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
41 44
42 dc->disk.sectors_dirty_last = dirty; 45 dc->disk.sectors_dirty_last = dirty;
@@ -105,6 +108,31 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
105 return KEY_DIRTY(k); 108 return KEY_DIRTY(k);
106} 109}
107 110
111static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
112{
113 uint64_t stripe;
114 unsigned nr_sectors = KEY_SIZE(k);
115 struct cached_dev *dc = container_of(buf, struct cached_dev,
116 writeback_keys);
117 unsigned stripe_size = 1 << dc->disk.stripe_size_bits;
118
119 if (!KEY_DIRTY(k))
120 return false;
121
122 stripe = KEY_START(k) >> dc->disk.stripe_size_bits;
123 while (1) {
124 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) !=
125 stripe_size)
126 return false;
127
128 if (nr_sectors <= stripe_size)
129 return true;
130
131 nr_sectors -= stripe_size;
132 stripe++;
133 }
134}
135
108static void dirty_init(struct keybuf_key *w) 136static void dirty_init(struct keybuf_key *w)
109{ 137{
110 struct dirty_io *io = w->private; 138 struct dirty_io *io = w->private;
@@ -149,7 +177,22 @@ static void refill_dirty(struct closure *cl)
149 searched_from_start = true; 177 searched_from_start = true;
150 } 178 }
151 179
152 bch_refill_keybuf(dc->disk.c, buf, &end); 180 if (dc->partial_stripes_expensive) {
181 uint64_t i;
182
183 for (i = 0; i < dc->disk.nr_stripes; i++)
184 if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
185 1 << dc->disk.stripe_size_bits)
186 goto full_stripes;
187
188 goto normal_refill;
189full_stripes:
190 bch_refill_keybuf(dc->disk.c, buf, &end,
191 dirty_full_stripe_pred);
192 } else {
193normal_refill:
194 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
195 }
153 196
154 if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) { 197 if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
155 /* Searched the entire btree - delay awhile */ 198 /* Searched the entire btree - delay awhile */
@@ -181,10 +224,8 @@ void bch_writeback_queue(struct cached_dev *dc)
181 } 224 }
182} 225}
183 226
184void bch_writeback_add(struct cached_dev *dc, unsigned sectors) 227void bch_writeback_add(struct cached_dev *dc)
185{ 228{
186 atomic_long_add(sectors, &dc->disk.sectors_dirty);
187
188 if (!atomic_read(&dc->has_dirty) && 229 if (!atomic_read(&dc->has_dirty) &&
189 !atomic_xchg(&dc->has_dirty, 1)) { 230 !atomic_xchg(&dc->has_dirty, 1)) {
190 atomic_inc(&dc->count); 231 atomic_inc(&dc->count);
@@ -203,6 +244,34 @@ void bch_writeback_add(struct cached_dev *dc, unsigned sectors)
203 } 244 }
204} 245}
205 246
247void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
248 uint64_t offset, int nr_sectors)
249{
250 struct bcache_device *d = c->devices[inode];
251 unsigned stripe_size, stripe_offset;
252 uint64_t stripe;
253
254 if (!d)
255 return;
256
257 stripe_size = 1 << d->stripe_size_bits;
258 stripe = offset >> d->stripe_size_bits;
259 stripe_offset = offset & (stripe_size - 1);
260
261 while (nr_sectors) {
262 int s = min_t(unsigned, abs(nr_sectors),
263 stripe_size - stripe_offset);
264
265 if (nr_sectors < 0)
266 s = -s;
267
268 atomic_add(s, d->stripe_sectors_dirty + stripe);
269 nr_sectors -= s;
270 stripe_offset = 0;
271 stripe++;
272 }
273}
274
206/* Background writeback - IO loop */ 275/* Background writeback - IO loop */
207 276
208static void dirty_io_destructor(struct closure *cl) 277static void dirty_io_destructor(struct closure *cl)
@@ -216,9 +285,10 @@ static void write_dirty_finish(struct closure *cl)
216 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 285 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
217 struct keybuf_key *w = io->bio.bi_private; 286 struct keybuf_key *w = io->bio.bi_private;
218 struct cached_dev *dc = io->dc; 287 struct cached_dev *dc = io->dc;
219 struct bio_vec *bv = bio_iovec_idx(&io->bio, io->bio.bi_vcnt); 288 struct bio_vec *bv;
289 int i;
220 290
221 while (bv-- != io->bio.bi_io_vec) 291 bio_for_each_segment_all(bv, &io->bio, i)
222 __free_page(bv->bv_page); 292 __free_page(bv->bv_page);
223 293
224 /* This is kind of a dumb way of signalling errors. */ 294 /* This is kind of a dumb way of signalling errors. */
@@ -236,10 +306,12 @@ static void write_dirty_finish(struct closure *cl)
236 for (i = 0; i < KEY_PTRS(&w->key); i++) 306 for (i = 0; i < KEY_PTRS(&w->key); i++)
237 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); 307 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
238 308
239 pr_debug("clearing %s", pkey(&w->key));
240 bch_btree_insert(&op, dc->disk.c); 309 bch_btree_insert(&op, dc->disk.c);
241 closure_sync(&op.cl); 310 closure_sync(&op.cl);
242 311
312 if (op.insert_collision)
313 trace_bcache_writeback_collision(&w->key);
314
243 atomic_long_inc(op.insert_collision 315 atomic_long_inc(op.insert_collision
244 ? &dc->disk.c->writeback_keys_failed 316 ? &dc->disk.c->writeback_keys_failed
245 : &dc->disk.c->writeback_keys_done); 317 : &dc->disk.c->writeback_keys_done);
@@ -275,7 +347,6 @@ static void write_dirty(struct closure *cl)
275 io->bio.bi_bdev = io->dc->bdev; 347 io->bio.bi_bdev = io->dc->bdev;
276 io->bio.bi_end_io = dirty_endio; 348 io->bio.bi_end_io = dirty_endio;
277 349
278 trace_bcache_write_dirty(&io->bio);
279 closure_bio_submit(&io->bio, cl, &io->dc->disk); 350 closure_bio_submit(&io->bio, cl, &io->dc->disk);
280 351
281 continue_at(cl, write_dirty_finish, dirty_wq); 352 continue_at(cl, write_dirty_finish, dirty_wq);
@@ -296,7 +367,6 @@ static void read_dirty_submit(struct closure *cl)
296{ 367{
297 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 368 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
298 369
299 trace_bcache_read_dirty(&io->bio);
300 closure_bio_submit(&io->bio, cl, &io->dc->disk); 370 closure_bio_submit(&io->bio, cl, &io->dc->disk);
301 371
302 continue_at(cl, write_dirty, dirty_wq); 372 continue_at(cl, write_dirty, dirty_wq);
@@ -349,10 +419,10 @@ static void read_dirty(struct closure *cl)
349 io->bio.bi_rw = READ; 419 io->bio.bi_rw = READ;
350 io->bio.bi_end_io = read_dirty_endio; 420 io->bio.bi_end_io = read_dirty_endio;
351 421
352 if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL)) 422 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
353 goto err_free; 423 goto err_free;
354 424
355 pr_debug("%s", pkey(&w->key)); 425 trace_bcache_writeback(&w->key);
356 426
357 closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); 427 closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
358 428
@@ -375,12 +445,49 @@ err:
375 refill_dirty(cl); 445 refill_dirty(cl);
376} 446}
377 447
448/* Init */
449
450static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
451 struct cached_dev *dc)
452{
453 struct bkey *k;
454 struct btree_iter iter;
455
456 bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
457 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
458 if (!b->level) {
459 if (KEY_INODE(k) > dc->disk.id)
460 break;
461
462 if (KEY_DIRTY(k))
463 bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
464 KEY_START(k),
465 KEY_SIZE(k));
466 } else {
467 btree(sectors_dirty_init, k, b, op, dc);
468 if (KEY_INODE(k) > dc->disk.id)
469 break;
470
471 cond_resched();
472 }
473
474 return 0;
475}
476
477void bch_sectors_dirty_init(struct cached_dev *dc)
478{
479 struct btree_op op;
480
481 bch_btree_op_init_stack(&op);
482 btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
483}
484
378void bch_cached_dev_writeback_init(struct cached_dev *dc) 485void bch_cached_dev_writeback_init(struct cached_dev *dc)
379{ 486{
380 closure_init_unlocked(&dc->writeback); 487 closure_init_unlocked(&dc->writeback);
381 init_rwsem(&dc->writeback_lock); 488 init_rwsem(&dc->writeback_lock);
382 489
383 bch_keybuf_init(&dc->writeback_keys, dirty_pred); 490 bch_keybuf_init(&dc->writeback_keys);
384 491
385 dc->writeback_metadata = true; 492 dc->writeback_metadata = true;
386 dc->writeback_running = true; 493 dc->writeback_running = true;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
new file mode 100644
index 000000000000..c91f61bb95b6
--- /dev/null
+++ b/drivers/md/bcache/writeback.h
@@ -0,0 +1,64 @@
1#ifndef _BCACHE_WRITEBACK_H
2#define _BCACHE_WRITEBACK_H
3
4#define CUTOFF_WRITEBACK 40
5#define CUTOFF_WRITEBACK_SYNC 70
6
7static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
8{
9 uint64_t i, ret = 0;
10
11 for (i = 0; i < d->nr_stripes; i++)
12 ret += atomic_read(d->stripe_sectors_dirty + i);
13
14 return ret;
15}
16
17static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
18 uint64_t offset,
19 unsigned nr_sectors)
20{
21 uint64_t stripe = offset >> d->stripe_size_bits;
22
23 while (1) {
24 if (atomic_read(d->stripe_sectors_dirty + stripe))
25 return true;
26
27 if (nr_sectors <= 1 << d->stripe_size_bits)
28 return false;
29
30 nr_sectors -= 1 << d->stripe_size_bits;
31 stripe++;
32 }
33}
34
35static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
36 unsigned cache_mode, bool would_skip)
37{
38 unsigned in_use = dc->disk.c->gc_stats.in_use;
39
40 if (cache_mode != CACHE_MODE_WRITEBACK ||
41 atomic_read(&dc->disk.detaching) ||
42 in_use > CUTOFF_WRITEBACK_SYNC)
43 return false;
44
45 if (dc->partial_stripes_expensive &&
46 bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
47 bio_sectors(bio)))
48 return true;
49
50 if (would_skip)
51 return false;
52
53 return bio->bi_rw & REQ_SYNC ||
54 in_use <= CUTOFF_WRITEBACK;
55}
56
57void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
58void bch_writeback_queue(struct cached_dev *);
59void bch_writeback_add(struct cached_dev *);
60
61void bch_sectors_dirty_init(struct cached_dev *dc);
62void bch_cached_dev_writeback_init(struct cached_dev *);
63
64#endif
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index dc112a7137fe..4296155090b2 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -959,23 +959,21 @@ out:
959 return r; 959 return r;
960} 960}
961 961
962static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) 962static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
963{ 963{
964 struct entry *e = hash_lookup(mq, oblock); 964 struct mq_policy *mq = to_mq_policy(p);
965 struct entry *e;
966
967 mutex_lock(&mq->lock);
968
969 e = hash_lookup(mq, oblock);
965 970
966 BUG_ON(!e || !e->in_cache); 971 BUG_ON(!e || !e->in_cache);
967 972
968 del(mq, e); 973 del(mq, e);
969 e->in_cache = false; 974 e->in_cache = false;
970 push(mq, e); 975 push(mq, e);
971}
972 976
973static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
974{
975 struct mq_policy *mq = to_mq_policy(p);
976
977 mutex_lock(&mq->lock);
978 remove_mapping(mq, oblock);
979 mutex_unlock(&mq->lock); 977 mutex_unlock(&mq->lock);
980} 978}
981 979
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 957a719e8c2f..df7b0a06b0ea 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2290,12 +2290,18 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2290 d = r10_bio->devs[1].devnum; 2290 d = r10_bio->devs[1].devnum;
2291 wbio = r10_bio->devs[1].bio; 2291 wbio = r10_bio->devs[1].bio;
2292 wbio2 = r10_bio->devs[1].repl_bio; 2292 wbio2 = r10_bio->devs[1].repl_bio;
2293 /* Need to test wbio2->bi_end_io before we call
2294 * generic_make_request as if the former is NULL,
2295 * the latter is free to free wbio2.
2296 */
2297 if (wbio2 && !wbio2->bi_end_io)
2298 wbio2 = NULL;
2293 if (wbio->bi_end_io) { 2299 if (wbio->bi_end_io) {
2294 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2300 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2295 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); 2301 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2296 generic_make_request(wbio); 2302 generic_make_request(wbio);
2297 } 2303 }
2298 if (wbio2 && wbio2->bi_end_io) { 2304 if (wbio2) {
2299 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2305 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2300 md_sync_acct(conf->mirrors[d].replacement->bdev, 2306 md_sync_acct(conf->mirrors[d].replacement->bdev,
2301 bio_sectors(wbio2)); 2307 bio_sectors(wbio2));
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2bf094a587cb..78ea44336e75 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3462,6 +3462,7 @@ static void handle_stripe(struct stripe_head *sh)
3462 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 3462 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3463 set_bit(STRIPE_SYNCING, &sh->state); 3463 set_bit(STRIPE_SYNCING, &sh->state);
3464 clear_bit(STRIPE_INSYNC, &sh->state); 3464 clear_bit(STRIPE_INSYNC, &sh->state);
3465 clear_bit(STRIPE_REPLACED, &sh->state);
3465 } 3466 }
3466 spin_unlock(&sh->stripe_lock); 3467 spin_unlock(&sh->stripe_lock);
3467 } 3468 }
@@ -3607,19 +3608,23 @@ static void handle_stripe(struct stripe_head *sh)
3607 handle_parity_checks5(conf, sh, &s, disks); 3608 handle_parity_checks5(conf, sh, &s, disks);
3608 } 3609 }
3609 3610
3610 if (s.replacing && s.locked == 0 3611 if ((s.replacing || s.syncing) && s.locked == 0
3611 && !test_bit(STRIPE_INSYNC, &sh->state)) { 3612 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
3613 && !test_bit(STRIPE_REPLACED, &sh->state)) {
3612 /* Write out to replacement devices where possible */ 3614 /* Write out to replacement devices where possible */
3613 for (i = 0; i < conf->raid_disks; i++) 3615 for (i = 0; i < conf->raid_disks; i++)
3614 if (test_bit(R5_UPTODATE, &sh->dev[i].flags) && 3616 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
3615 test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 3617 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
3616 set_bit(R5_WantReplace, &sh->dev[i].flags); 3618 set_bit(R5_WantReplace, &sh->dev[i].flags);
3617 set_bit(R5_LOCKED, &sh->dev[i].flags); 3619 set_bit(R5_LOCKED, &sh->dev[i].flags);
3618 s.locked++; 3620 s.locked++;
3619 } 3621 }
3620 set_bit(STRIPE_INSYNC, &sh->state); 3622 if (s.replacing)
3623 set_bit(STRIPE_INSYNC, &sh->state);
3624 set_bit(STRIPE_REPLACED, &sh->state);
3621 } 3625 }
3622 if ((s.syncing || s.replacing) && s.locked == 0 && 3626 if ((s.syncing || s.replacing) && s.locked == 0 &&
3627 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3623 test_bit(STRIPE_INSYNC, &sh->state)) { 3628 test_bit(STRIPE_INSYNC, &sh->state)) {
3624 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3629 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3625 clear_bit(STRIPE_SYNCING, &sh->state); 3630 clear_bit(STRIPE_SYNCING, &sh->state);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index b0b663b119a8..70c49329ca9a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -306,6 +306,7 @@ enum {
306 STRIPE_SYNC_REQUESTED, 306 STRIPE_SYNC_REQUESTED,
307 STRIPE_SYNCING, 307 STRIPE_SYNCING,
308 STRIPE_INSYNC, 308 STRIPE_INSYNC,
309 STRIPE_REPLACED,
309 STRIPE_PREREAD_ACTIVE, 310 STRIPE_PREREAD_ACTIVE,
310 STRIPE_DELAYED, 311 STRIPE_DELAYED,
311 STRIPE_DEGRADED, 312 STRIPE_DEGRADED,
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index efdc873e58d1..a9857022f71d 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -117,7 +117,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl)
117{ 117{
118 struct v4l2_subdev *sd = to_sd(ctrl); 118 struct v4l2_subdev *sd = to_sd(ctrl);
119 struct i2c_client *client = v4l2_get_subdevdata(sd); 119 struct i2c_client *client = v4l2_get_subdevdata(sd);
120 int ret; 120 int ret = -EINVAL;
121 121
122 switch (ctrl->id) { 122 switch (ctrl->id) {
123 case V4L2_CID_BRIGHTNESS: 123 case V4L2_CID_BRIGHTNESS:
@@ -157,7 +157,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl)
157 break; 157 break;
158 } 158 }
159 159
160 return 0; 160 return ret;
161} 161}
162 162
163static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) 163static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index df4ada880e42..bd9405df1bd6 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -1987,7 +1987,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids);
1987 1987
1988#ifdef CONFIG_OF 1988#ifdef CONFIG_OF
1989static const struct of_device_id coda_dt_ids[] = { 1989static const struct of_device_id coda_dt_ids[] = {
1990 { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] }, 1990 { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
1991 { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] }, 1991 { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
1992 { /* sentinel */ } 1992 { /* sentinel */ }
1993}; 1993};
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 553d87e5ceab..fd6289d60cde 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -784,6 +784,7 @@ static int g2d_probe(struct platform_device *pdev)
784 } 784 }
785 *vfd = g2d_videodev; 785 *vfd = g2d_videodev;
786 vfd->lock = &dev->mutex; 786 vfd->lock = &dev->mutex;
787 vfd->v4l2_dev = &dev->v4l2_dev;
787 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); 788 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
788 if (ret) { 789 if (ret) {
789 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); 790 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 5296385153d5..4f6dd42c9adb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -344,7 +344,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
344 pix_mp->num_planes = 2; 344 pix_mp->num_planes = 2;
345 /* Set pixelformat to the format in which MFC 345 /* Set pixelformat to the format in which MFC
346 outputs the decoded frame */ 346 outputs the decoded frame */
347 pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT; 347 pix_mp->pixelformat = ctx->dst_fmt->fourcc;
348 pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; 348 pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
349 pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; 349 pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
350 pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; 350 pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
@@ -382,10 +382,16 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
382 mfc_err("Unsupported format for source.\n"); 382 mfc_err("Unsupported format for source.\n");
383 return -EINVAL; 383 return -EINVAL;
384 } 384 }
385 if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { 385 if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
386 mfc_err("Not supported format.\n"); 386 mfc_err("Unknown codec\n");
387 return -EINVAL; 387 return -EINVAL;
388 } 388 }
389 if (!IS_MFCV6(dev)) {
390 if (fmt->fourcc == V4L2_PIX_FMT_VP8) {
391 mfc_err("Not supported format.\n");
392 return -EINVAL;
393 }
394 }
389 } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 395 } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
390 fmt = find_format(f, MFC_FMT_RAW); 396 fmt = find_format(f, MFC_FMT_RAW);
391 if (!fmt) { 397 if (!fmt) {
@@ -411,7 +417,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
411 struct s5p_mfc_dev *dev = video_drvdata(file); 417 struct s5p_mfc_dev *dev = video_drvdata(file);
412 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); 418 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
413 int ret = 0; 419 int ret = 0;
414 struct s5p_mfc_fmt *fmt;
415 struct v4l2_pix_format_mplane *pix_mp; 420 struct v4l2_pix_format_mplane *pix_mp;
416 421
417 mfc_debug_enter(); 422 mfc_debug_enter();
@@ -425,54 +430,32 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
425 goto out; 430 goto out;
426 } 431 }
427 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 432 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
428 fmt = find_format(f, MFC_FMT_RAW); 433 /* dst_fmt is validated by call to vidioc_try_fmt */
429 if (!fmt) { 434 ctx->dst_fmt = find_format(f, MFC_FMT_RAW);
430 mfc_err("Unsupported format for source.\n"); 435 ret = 0;
431 return -EINVAL;
432 }
433 if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) {
434 mfc_err("Not supported format.\n");
435 return -EINVAL;
436 } else if (IS_MFCV6(dev) &&
437 (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
438 mfc_err("Not supported format.\n");
439 return -EINVAL;
440 }
441 ctx->dst_fmt = fmt;
442 mfc_debug_leave();
443 return ret;
444 } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
445 mfc_err("Wrong type error for S_FMT : %d", f->type);
446 return -EINVAL;
447 }
448 fmt = find_format(f, MFC_FMT_DEC);
449 if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) {
450 mfc_err("Unknown codec\n");
451 ret = -EINVAL;
452 goto out; 436 goto out;
453 } 437 } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
454 if (fmt->type != MFC_FMT_DEC) { 438 /* src_fmt is validated by call to vidioc_try_fmt */
455 mfc_err("Wrong format selected, you should choose " 439 ctx->src_fmt = find_format(f, MFC_FMT_DEC);
456 "format for decoding\n"); 440 ctx->codec_mode = ctx->src_fmt->codec_mode;
441 mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
442 pix_mp->height = 0;
443 pix_mp->width = 0;
444 if (pix_mp->plane_fmt[0].sizeimage)
445 ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
446 else
447 pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
448 DEF_CPB_SIZE;
449 pix_mp->plane_fmt[0].bytesperline = 0;
450 ctx->state = MFCINST_INIT;
451 ret = 0;
452 goto out;
453 } else {
454 mfc_err("Wrong type error for S_FMT : %d", f->type);
457 ret = -EINVAL; 455 ret = -EINVAL;
458 goto out; 456 goto out;
459 } 457 }
460 if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { 458
461 mfc_err("Not supported format.\n");
462 return -EINVAL;
463 }
464 ctx->src_fmt = fmt;
465 ctx->codec_mode = fmt->codec_mode;
466 mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
467 pix_mp->height = 0;
468 pix_mp->width = 0;
469 if (pix_mp->plane_fmt[0].sizeimage)
470 ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
471 else
472 pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
473 DEF_CPB_SIZE;
474 pix_mp->plane_fmt[0].bytesperline = 0;
475 ctx->state = MFCINST_INIT;
476out: 459out:
477 mfc_debug_leave(); 460 mfc_debug_leave();
478 return ret; 461 return ret;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2549967b2f85..59e56f4c8ce3 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -906,6 +906,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
906 906
907static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) 907static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
908{ 908{
909 struct s5p_mfc_dev *dev = video_drvdata(file);
909 struct s5p_mfc_fmt *fmt; 910 struct s5p_mfc_fmt *fmt;
910 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; 911 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
911 912
@@ -930,6 +931,18 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
930 return -EINVAL; 931 return -EINVAL;
931 } 932 }
932 933
934 if (!IS_MFCV6(dev)) {
935 if (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) {
936 mfc_err("Not supported format.\n");
937 return -EINVAL;
938 }
939 } else if (IS_MFCV6(dev)) {
940 if (fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
941 mfc_err("Not supported format.\n");
942 return -EINVAL;
943 }
944 }
945
933 if (fmt->num_planes != pix_fmt_mp->num_planes) { 946 if (fmt->num_planes != pix_fmt_mp->num_planes) {
934 mfc_err("failed to try output format\n"); 947 mfc_err("failed to try output format\n");
935 return -EINVAL; 948 return -EINVAL;
@@ -947,7 +960,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
947{ 960{
948 struct s5p_mfc_dev *dev = video_drvdata(file); 961 struct s5p_mfc_dev *dev = video_drvdata(file);
949 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); 962 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
950 struct s5p_mfc_fmt *fmt;
951 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; 963 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
952 int ret = 0; 964 int ret = 0;
953 965
@@ -960,13 +972,9 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
960 goto out; 972 goto out;
961 } 973 }
962 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 974 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
963 fmt = find_format(f, MFC_FMT_ENC); 975 /* dst_fmt is validated by call to vidioc_try_fmt */
964 if (!fmt) { 976 ctx->dst_fmt = find_format(f, MFC_FMT_ENC);
965 mfc_err("failed to set capture format\n");
966 return -EINVAL;
967 }
968 ctx->state = MFCINST_INIT; 977 ctx->state = MFCINST_INIT;
969 ctx->dst_fmt = fmt;
970 ctx->codec_mode = ctx->dst_fmt->codec_mode; 978 ctx->codec_mode = ctx->dst_fmt->codec_mode;
971 ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage; 979 ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage;
972 pix_fmt_mp->plane_fmt[0].bytesperline = 0; 980 pix_fmt_mp->plane_fmt[0].bytesperline = 0;
@@ -987,28 +995,8 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
987 } 995 }
988 mfc_debug(2, "Got instance number: %d\n", ctx->inst_no); 996 mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
989 } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 997 } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
990 fmt = find_format(f, MFC_FMT_RAW); 998 /* src_fmt is validated by call to vidioc_try_fmt */
991 if (!fmt) { 999 ctx->src_fmt = find_format(f, MFC_FMT_RAW);
992 mfc_err("failed to set output format\n");
993 return -EINVAL;
994 }
995
996 if (!IS_MFCV6(dev) &&
997 (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) {
998 mfc_err("Not supported format.\n");
999 return -EINVAL;
1000 } else if (IS_MFCV6(dev) &&
1001 (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
1002 mfc_err("Not supported format.\n");
1003 return -EINVAL;
1004 }
1005
1006 if (fmt->num_planes != pix_fmt_mp->num_planes) {
1007 mfc_err("failed to set output format\n");
1008 ret = -EINVAL;
1009 goto out;
1010 }
1011 ctx->src_fmt = fmt;
1012 ctx->img_width = pix_fmt_mp->width; 1000 ctx->img_width = pix_fmt_mp->width;
1013 ctx->img_height = pix_fmt_mp->height; 1001 ctx->img_height = pix_fmt_mp->height;
1014 mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode); 1002 mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 4851cc2e4a4d..c4ff9739a7ae 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -726,7 +726,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
726 726
727 *eedata = data; 727 *eedata = data;
728 *eedata_len = len; 728 *eedata_len = len;
729 dev_config = (void *)eedata; 729 dev_config = (void *)*eedata;
730 730
731 switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) { 731 switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
732 case 0: 732 case 0:
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index cb694055ba7d..6e5070774dc2 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -303,6 +303,11 @@ static int hdpvr_probe(struct usb_interface *interface,
303 303
304 dev->workqueue = 0; 304 dev->workqueue = 0;
305 305
306 /* init video transfer queues first of all */
307 /* to prevent oops in hdpvr_delete() on error paths */
308 INIT_LIST_HEAD(&dev->free_buff_list);
309 INIT_LIST_HEAD(&dev->rec_buff_list);
310
306 /* register v4l2_device early so it can be used for printks */ 311 /* register v4l2_device early so it can be used for printks */
307 if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { 312 if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
308 dev_err(&interface->dev, "v4l2_device_register failed\n"); 313 dev_err(&interface->dev, "v4l2_device_register failed\n");
@@ -325,10 +330,6 @@ static int hdpvr_probe(struct usb_interface *interface,
325 if (!dev->workqueue) 330 if (!dev->workqueue)
326 goto error; 331 goto error;
327 332
328 /* init video transfer queues */
329 INIT_LIST_HEAD(&dev->free_buff_list);
330 INIT_LIST_HEAD(&dev->rec_buff_list);
331
332 dev->options = hdpvr_default_options; 333 dev->options = hdpvr_default_options;
333 334
334 if (default_video_input < HDPVR_VIDEO_INPUTS) 335 if (default_video_input < HDPVR_VIDEO_INPUTS)
@@ -405,7 +406,7 @@ static int hdpvr_probe(struct usb_interface *interface,
405 video_nr[atomic_inc_return(&dev_nr)]); 406 video_nr[atomic_inc_return(&dev_nr)]);
406 if (retval < 0) { 407 if (retval < 0) {
407 v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); 408 v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
408 goto error; 409 goto reg_fail;
409 } 410 }
410 411
411 /* let the user know what node this device is now attached to */ 412 /* let the user know what node this device is now attached to */
diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig
index 8864436464bf..7c5b86006ee6 100644
--- a/drivers/media/usb/usbtv/Kconfig
+++ b/drivers/media/usb/usbtv/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_USBTV 1config VIDEO_USBTV
2 tristate "USBTV007 video capture support" 2 tristate "USBTV007 video capture support"
3 depends on VIDEO_DEV 3 depends on VIDEO_V4L2
4 select VIDEOBUF2_VMALLOC 4 select VIDEOBUF2_VMALLOC
5 5
6 ---help--- 6 ---help---
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c
index bf43f874685e..91650173941a 100644
--- a/drivers/media/usb/usbtv/usbtv.c
+++ b/drivers/media/usb/usbtv/usbtv.c
@@ -57,7 +57,7 @@
57#define USBTV_CHUNK_SIZE 256 57#define USBTV_CHUNK_SIZE 256
58#define USBTV_CHUNK 240 58#define USBTV_CHUNK 240
59#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ 59#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \
60 / 2 / USBTV_CHUNK) 60 / 4 / USBTV_CHUNK)
61 61
62/* Chunk header. */ 62/* Chunk header. */
63#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ 63#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
@@ -89,6 +89,7 @@ struct usbtv {
89 /* Number of currently processed frame, useful find 89 /* Number of currently processed frame, useful find
90 * out when a new one begins. */ 90 * out when a new one begins. */
91 u32 frame_id; 91 u32 frame_id;
92 int chunks_done;
92 93
93 int iso_size; 94 int iso_size;
94 unsigned int sequence; 95 unsigned int sequence;
@@ -202,6 +203,26 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
202 return 0; 203 return 0;
203} 204}
204 205
206/* Copy data from chunk into a frame buffer, deinterlacing the data
207 * into every second line. Unfortunately, they don't align nicely into
208 * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels.
209 * Therefore, we break down the chunk into two halves before copyting,
210 * so that we can interleave a line if needed. */
211static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd)
212{
213 int half;
214
215 for (half = 0; half < 2; half++) {
216 int part_no = chunk_no * 2 + half;
217 int line = part_no / 3;
218 int part_index = (line * 2 + !odd) * 3 + (part_no % 3);
219
220 u32 *dst = &frame[part_index * USBTV_CHUNK/2];
221 memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src));
222 src += USBTV_CHUNK/2;
223 }
224}
225
205/* Called for each 256-byte image chunk. 226/* Called for each 256-byte image chunk.
206 * First word identifies the chunk, followed by 240 words of image 227 * First word identifies the chunk, followed by 240 words of image
207 * data and padding. */ 228 * data and padding. */
@@ -218,17 +239,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
218 frame_id = USBTV_FRAME_ID(chunk); 239 frame_id = USBTV_FRAME_ID(chunk);
219 odd = USBTV_ODD(chunk); 240 odd = USBTV_ODD(chunk);
220 chunk_no = USBTV_CHUNK_NO(chunk); 241 chunk_no = USBTV_CHUNK_NO(chunk);
221
222 /* Deinterlace. TODO: Use interlaced frame format. */
223 chunk_no = (chunk_no - chunk_no % 3) * 2 + chunk_no % 3;
224 chunk_no += !odd * 3;
225
226 if (chunk_no >= USBTV_CHUNKS) 242 if (chunk_no >= USBTV_CHUNKS)
227 return; 243 return;
228 244
229 /* Beginning of a frame. */ 245 /* Beginning of a frame. */
230 if (chunk_no == 0) 246 if (chunk_no == 0) {
231 usbtv->frame_id = frame_id; 247 usbtv->frame_id = frame_id;
248 usbtv->chunks_done = 0;
249 }
250
251 if (usbtv->frame_id != frame_id)
252 return;
232 253
233 spin_lock_irqsave(&usbtv->buflock, flags); 254 spin_lock_irqsave(&usbtv->buflock, flags);
234 if (list_empty(&usbtv->bufs)) { 255 if (list_empty(&usbtv->bufs)) {
@@ -241,19 +262,23 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
241 buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list); 262 buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
242 frame = vb2_plane_vaddr(&buf->vb, 0); 263 frame = vb2_plane_vaddr(&buf->vb, 0);
243 264
244 /* Copy the chunk. */ 265 /* Copy the chunk data. */
245 memcpy(&frame[chunk_no * USBTV_CHUNK], &chunk[1], 266 usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
246 USBTV_CHUNK * sizeof(chunk[1])); 267 usbtv->chunks_done++;
247 268
248 /* Last chunk in a frame, signalling an end */ 269 /* Last chunk in a frame, signalling an end */
249 if (usbtv->frame_id && chunk_no == USBTV_CHUNKS-1) { 270 if (odd && chunk_no == USBTV_CHUNKS-1) {
250 int size = vb2_plane_size(&buf->vb, 0); 271 int size = vb2_plane_size(&buf->vb, 0);
272 enum vb2_buffer_state state = usbtv->chunks_done ==
273 USBTV_CHUNKS ?
274 VB2_BUF_STATE_DONE :
275 VB2_BUF_STATE_ERROR;
251 276
252 buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; 277 buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
253 buf->vb.v4l2_buf.sequence = usbtv->sequence++; 278 buf->vb.v4l2_buf.sequence = usbtv->sequence++;
254 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 279 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
255 vb2_set_plane_payload(&buf->vb, 0, size); 280 vb2_set_plane_payload(&buf->vb, 0, size);
256 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 281 vb2_buffer_done(&buf->vb, state);
257 list_del(&buf->list); 282 list_del(&buf->list);
258 } 283 }
259 284
@@ -518,7 +543,7 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
518 if (*nbuffers < 2) 543 if (*nbuffers < 2)
519 *nbuffers = 2; 544 *nbuffers = 2;
520 *nplanes = 1; 545 *nplanes = 1;
521 sizes[0] = USBTV_CHUNK * USBTV_CHUNKS * sizeof(u32); 546 sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32);
522 547
523 return 0; 548 return 0;
524} 549}
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index f7b90661e321..e068a76a5f6f 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -66,14 +66,19 @@ EXPORT_SYMBOL(ssc_request);
66 66
67void ssc_free(struct ssc_device *ssc) 67void ssc_free(struct ssc_device *ssc)
68{ 68{
69 bool disable_clk = true;
70
69 spin_lock(&user_lock); 71 spin_lock(&user_lock);
70 if (ssc->user) { 72 if (ssc->user)
71 ssc->user--; 73 ssc->user--;
72 clk_disable_unprepare(ssc->clk); 74 else {
73 } else { 75 disable_clk = false;
74 dev_dbg(&ssc->pdev->dev, "device already free\n"); 76 dev_dbg(&ssc->pdev->dev, "device already free\n");
75 } 77 }
76 spin_unlock(&user_lock); 78 spin_unlock(&user_lock);
79
80 if (disable_clk)
81 clk_disable_unprepare(ssc->clk);
77} 82}
78EXPORT_SYMBOL(ssc_free); 83EXPORT_SYMBOL(ssc_free);
79 84
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index f9296abcf02a..6127ab64bb39 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -167,7 +167,7 @@ int mei_hbm_start_req(struct mei_device *dev)
167 167
168 dev->hbm_state = MEI_HBM_IDLE; 168 dev->hbm_state = MEI_HBM_IDLE;
169 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { 169 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
170 dev_err(&dev->pdev->dev, "version message writet failed\n"); 170 dev_err(&dev->pdev->dev, "version message write failed\n");
171 dev->dev_state = MEI_DEV_RESETTING; 171 dev->dev_state = MEI_DEV_RESETTING;
172 mei_reset(dev, 1); 172 mei_reset(dev, 1);
173 return -ENODEV; 173 return -ENODEV;
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index e4f8dec4dc3c..b22c7e247225 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -239,14 +239,18 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
239 if (mei_me_hw_is_ready(dev)) 239 if (mei_me_hw_is_ready(dev))
240 return 0; 240 return 0;
241 241
242 dev->recvd_hw_ready = false;
242 mutex_unlock(&dev->device_lock); 243 mutex_unlock(&dev->device_lock);
243 err = wait_event_interruptible_timeout(dev->wait_hw_ready, 244 err = wait_event_interruptible_timeout(dev->wait_hw_ready,
244 dev->recvd_hw_ready, MEI_INTEROP_TIMEOUT); 245 dev->recvd_hw_ready,
246 mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
245 mutex_lock(&dev->device_lock); 247 mutex_lock(&dev->device_lock);
246 if (!err && !dev->recvd_hw_ready) { 248 if (!err && !dev->recvd_hw_ready) {
249 if (!err)
250 err = -ETIMEDOUT;
247 dev_err(&dev->pdev->dev, 251 dev_err(&dev->pdev->dev,
248 "wait hw ready failed. status = 0x%x\n", err); 252 "wait hw ready failed. status = %d\n", err);
249 return -ETIMEDOUT; 253 return err;
250 } 254 }
251 255
252 dev->recvd_hw_ready = false; 256 dev->recvd_hw_ready = false;
@@ -483,7 +487,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
483 /* check if ME wants a reset */ 487 /* check if ME wants a reset */
484 if (!mei_hw_is_ready(dev) && 488 if (!mei_hw_is_ready(dev) &&
485 dev->dev_state != MEI_DEV_RESETTING && 489 dev->dev_state != MEI_DEV_RESETTING &&
486 dev->dev_state != MEI_DEV_INITIALIZING) { 490 dev->dev_state != MEI_DEV_INITIALIZING &&
491 dev->dev_state != MEI_DEV_POWER_DOWN &&
492 dev->dev_state != MEI_DEV_POWER_UP) {
487 dev_dbg(&dev->pdev->dev, "FW not ready.\n"); 493 dev_dbg(&dev->pdev->dev, "FW not ready.\n");
488 mei_reset(dev, 1); 494 mei_reset(dev, 1);
489 mutex_unlock(&dev->device_lock); 495 mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index ed1d75203af6..e6f16f83ecde 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -148,7 +148,8 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
148 148
149 dev->hbm_state = MEI_HBM_IDLE; 149 dev->hbm_state = MEI_HBM_IDLE;
150 150
151 if (dev->dev_state != MEI_DEV_INITIALIZING) { 151 if (dev->dev_state != MEI_DEV_INITIALIZING &&
152 dev->dev_state != MEI_DEV_POWER_UP) {
152 if (dev->dev_state != MEI_DEV_DISABLED && 153 if (dev->dev_state != MEI_DEV_DISABLED &&
153 dev->dev_state != MEI_DEV_POWER_DOWN) 154 dev->dev_state != MEI_DEV_POWER_DOWN)
154 dev->dev_state = MEI_DEV_RESETTING; 155 dev->dev_state = MEI_DEV_RESETTING;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 847b1996ce8e..2c5a91bb8ec3 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -128,7 +128,7 @@ static inline int pxamci_set_power(struct pxamci_host *host,
128 !!on ^ host->pdata->gpio_power_invert); 128 !!on ^ host->pdata->gpio_power_invert);
129 } 129 }
130 if (!host->vcc && host->pdata && host->pdata->setpower) 130 if (!host->vcc && host->pdata && host->pdata->setpower)
131 host->pdata->setpower(mmc_dev(host->mmc), vdd); 131 return host->pdata->setpower(mmc_dev(host->mmc), vdd);
132 132
133 return 0; 133 return 0;
134} 134}
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index a746ba272f04..a956053608f9 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
1007 1007
1008 soft = &pkt.soft.rfc1201; 1008 soft = &pkt.soft.rfc1201;
1009 1009
1010 lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE)); 1010 lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
1011 if (pkt.hard.offset[0]) { 1011 if (pkt.hard.offset[0]) {
1012 ofs = pkt.hard.offset[0]; 1012 ofs = pkt.hard.offset[0];
1013 length = 256 - ofs; 1013 length = 256 - ofs;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 07f257d44a1e..e48cb339c0c6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n)
3714 * The bonding ndo_neigh_setup is called at init time beofre any 3714 * The bonding ndo_neigh_setup is called at init time beofre any
3715 * slave exists. So we must declare proxy setup function which will 3715 * slave exists. So we must declare proxy setup function which will
3716 * be used at run time to resolve the actual slave neigh param setup. 3716 * be used at run time to resolve the actual slave neigh param setup.
3717 *
3718 * It's also called by master devices (such as vlans) to setup their
3719 * underlying devices. In that case - do nothing, we're already set up from
3720 * our init.
3717 */ 3721 */
3718static int bond_neigh_setup(struct net_device *dev, 3722static int bond_neigh_setup(struct net_device *dev,
3719 struct neigh_parms *parms) 3723 struct neigh_parms *parms)
3720{ 3724{
3721 parms->neigh_setup = bond_neigh_init; 3725 /* modify only our neigh_parms */
3726 if (parms->dev == dev)
3727 parms->neigh_setup = bond_neigh_init;
3722 3728
3723 return 0; 3729 return 0;
3724} 3730}
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 6aa7b3266c80..ac6177d3befc 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -412,10 +412,20 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
412 412
413 switch (msg->msg.hdr.cmd) { 413 switch (msg->msg.hdr.cmd) {
414 case CMD_CAN_RX: 414 case CMD_CAN_RX:
415 if (msg->msg.rx.net >= dev->net_count) {
416 dev_err(dev->udev->dev.parent, "format error\n");
417 break;
418 }
419
415 esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg); 420 esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
416 break; 421 break;
417 422
418 case CMD_CAN_TX: 423 case CMD_CAN_TX:
424 if (msg->msg.txdone.net >= dev->net_count) {
425 dev_err(dev->udev->dev.parent, "format error\n");
426 break;
427 }
428
419 esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net], 429 esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
420 msg); 430 msg);
421 break; 431 break;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25723d8ee201..925ab8ec9329 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
649 if ((mc->ptr + rec_len) > mc->end) 649 if ((mc->ptr + rec_len) > mc->end)
650 goto decode_failed; 650 goto decode_failed;
651 651
652 memcpy(cf->data, mc->ptr, rec_len); 652 memcpy(cf->data, mc->ptr, cf->can_dlc);
653 mc->ptr += rec_len; 653 mc->ptr += rec_len;
654 } 654 }
655 655
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index cbd388eea682..8becd3d838b5 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -779,6 +779,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
779 usb_unanchor_urb(urb); 779 usb_unanchor_urb(urb);
780 usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf, 780 usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf,
781 urb->transfer_dma); 781 urb->transfer_dma);
782 usb_free_urb(urb);
782 break; 783 break;
783 } 784 }
784 785
diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig
index 53ad213e865b..d8d95d4cd45a 100644
--- a/drivers/net/ethernet/allwinner/Kconfig
+++ b/drivers/net/ethernet/allwinner/Kconfig
@@ -3,19 +3,20 @@
3# 3#
4 4
5config NET_VENDOR_ALLWINNER 5config NET_VENDOR_ALLWINNER
6 bool "Allwinner devices" 6 bool "Allwinner devices"
7 default y 7 default y
8 depends on ARCH_SUNXI
9 ---help---
10 If you have a network (Ethernet) card belonging to this
11 class, say Y and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13 8
14 Note that the answer to this question doesn't directly 9 depends on ARCH_SUNXI
15 affect the kernel: saying N will just cause the configurator 10 ---help---
16 to skip all the questions about Allwinner cards. If you say Y, 11 If you have a network (Ethernet) card belonging to this
17 you will be asked for your specific card in the following 12 class, say Y and read the Ethernet-HOWTO, available from
18 questions. 13 <http://www.tldp.org/docs.html#howto>.
14
15 Note that the answer to this question doesn't directly
16 affect the kernel: saying N will just cause the configurator
17 to skip all the questions about Allwinner cards. If you say Y,
18 you will be asked for your specific card in the following
19 questions.
19 20
20if NET_VENDOR_ALLWINNER 21if NET_VENDOR_ALLWINNER
21 22
@@ -26,6 +27,7 @@ config SUN4I_EMAC
26 select CRC32 27 select CRC32
27 select MII 28 select MII
28 select PHYLIB 29 select PHYLIB
30 select MDIO_SUN4I
29 ---help--- 31 ---help---
30 Support for Allwinner A10 EMAC ethernet driver. 32 Support for Allwinner A10 EMAC ethernet driver.
31 33
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index f1b121ee5525..55d79cb53a79 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
199 struct arc_emac_priv *priv = netdev_priv(ndev); 199 struct arc_emac_priv *priv = netdev_priv(ndev);
200 unsigned int work_done; 200 unsigned int work_done;
201 201
202 for (work_done = 0; work_done <= budget; work_done++) { 202 for (work_done = 0; work_done < budget; work_done++) {
203 unsigned int *last_rx_bd = &priv->last_rx_bd; 203 unsigned int *last_rx_bd = &priv->last_rx_bd;
204 struct net_device_stats *stats = &priv->stats; 204 struct net_device_stats *stats = &priv->stats;
205 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 205 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index b2bf324631dc..0f0556526ba9 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -520,6 +520,9 @@ struct atl1c_adapter {
520 struct net_device *netdev; 520 struct net_device *netdev;
521 struct pci_dev *pdev; 521 struct pci_dev *pdev;
522 struct napi_struct napi; 522 struct napi_struct napi;
523 struct page *rx_page;
524 unsigned int rx_page_offset;
525 unsigned int rx_frag_size;
523 struct atl1c_hw hw; 526 struct atl1c_hw hw;
524 struct atl1c_hw_stats hw_stats; 527 struct atl1c_hw_stats hw_stats;
525 struct mii_if_info mii; /* MII interface info */ 528 struct mii_if_info mii; /* MII interface info */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 786a87483298..a36a760ada28 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -481,10 +481,15 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
481static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, 481static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
482 struct net_device *dev) 482 struct net_device *dev)
483{ 483{
484 unsigned int head_size;
484 int mtu = dev->mtu; 485 int mtu = dev->mtu;
485 486
486 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? 487 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
487 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; 488 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
489
490 head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) +
491 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
492 adapter->rx_frag_size = roundup_pow_of_two(head_size);
488} 493}
489 494
490static netdev_features_t atl1c_fix_features(struct net_device *netdev, 495static netdev_features_t atl1c_fix_features(struct net_device *netdev,
@@ -952,6 +957,10 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
952 kfree(adapter->tpd_ring[0].buffer_info); 957 kfree(adapter->tpd_ring[0].buffer_info);
953 adapter->tpd_ring[0].buffer_info = NULL; 958 adapter->tpd_ring[0].buffer_info = NULL;
954 } 959 }
960 if (adapter->rx_page) {
961 put_page(adapter->rx_page);
962 adapter->rx_page = NULL;
963 }
955} 964}
956 965
957/** 966/**
@@ -1639,6 +1648,35 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1639 skb_checksum_none_assert(skb); 1648 skb_checksum_none_assert(skb);
1640} 1649}
1641 1650
1651static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
1652{
1653 struct sk_buff *skb;
1654 struct page *page;
1655
1656 if (adapter->rx_frag_size > PAGE_SIZE)
1657 return netdev_alloc_skb(adapter->netdev,
1658 adapter->rx_buffer_len);
1659
1660 page = adapter->rx_page;
1661 if (!page) {
1662 adapter->rx_page = page = alloc_page(GFP_ATOMIC);
1663 if (unlikely(!page))
1664 return NULL;
1665 adapter->rx_page_offset = 0;
1666 }
1667
1668 skb = build_skb(page_address(page) + adapter->rx_page_offset,
1669 adapter->rx_frag_size);
1670 if (likely(skb)) {
1671 adapter->rx_page_offset += adapter->rx_frag_size;
1672 if (adapter->rx_page_offset >= PAGE_SIZE)
1673 adapter->rx_page = NULL;
1674 else
1675 get_page(page);
1676 }
1677 return skb;
1678}
1679
1642static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) 1680static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
1643{ 1681{
1644 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; 1682 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
@@ -1660,7 +1698,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
1660 while (next_info->flags & ATL1C_BUFFER_FREE) { 1698 while (next_info->flags & ATL1C_BUFFER_FREE) {
1661 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); 1699 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
1662 1700
1663 skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len); 1701 skb = atl1c_alloc_skb(adapter);
1664 if (unlikely(!skb)) { 1702 if (unlikely(!skb)) {
1665 if (netif_msg_rx_err(adapter)) 1703 if (netif_msg_rx_err(adapter))
1666 dev_warn(&pdev->dev, "alloc rx buffer failed\n"); 1704 dev_warn(&pdev->dev, "alloc rx buffer failed\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index dedbd76c033e..00b88cbfde25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -486,7 +486,7 @@ struct bnx2x_fastpath {
486 486
487 struct napi_struct napi; 487 struct napi_struct napi;
488 488
489#ifdef CONFIG_NET_LL_RX_POLL 489#ifdef CONFIG_NET_RX_BUSY_POLL
490 unsigned int state; 490 unsigned int state;
491#define BNX2X_FP_STATE_IDLE 0 491#define BNX2X_FP_STATE_IDLE 0
492#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 492#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
@@ -498,7 +498,7 @@ struct bnx2x_fastpath {
498#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 498#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
499 /* protect state */ 499 /* protect state */
500 spinlock_t lock; 500 spinlock_t lock;
501#endif /* CONFIG_NET_LL_RX_POLL */ 501#endif /* CONFIG_NET_RX_BUSY_POLL */
502 502
503 union host_hc_status_block status_blk; 503 union host_hc_status_block status_blk;
504 /* chip independent shortcuts into sb structure */ 504 /* chip independent shortcuts into sb structure */
@@ -572,7 +572,7 @@ struct bnx2x_fastpath {
572#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) 572#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
573#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) 573#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
574 574
575#ifdef CONFIG_NET_LL_RX_POLL 575#ifdef CONFIG_NET_RX_BUSY_POLL
576static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 576static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
577{ 577{
578 spin_lock_init(&fp->lock); 578 spin_lock_init(&fp->lock);
@@ -680,7 +680,7 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
680{ 680{
681 return false; 681 return false;
682} 682}
683#endif /* CONFIG_NET_LL_RX_POLL */ 683#endif /* CONFIG_NET_RX_BUSY_POLL */
684 684
685/* Use 2500 as a mini-jumbo MTU for FCoE */ 685/* Use 2500 as a mini-jumbo MTU for FCoE */
686#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 686#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
@@ -1333,6 +1333,8 @@ enum {
1333 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 1333 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
1334 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 1334 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
1335 BNX2X_SP_RTNL_HYPERVISOR_VLAN, 1335 BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1336 BNX2X_SP_RTNL_TX_STOP,
1337 BNX2X_SP_RTNL_TX_RESUME,
1336}; 1338};
1337 1339
1338struct bnx2x_prev_path_list { 1340struct bnx2x_prev_path_list {
@@ -1502,6 +1504,7 @@ struct bnx2x {
1502#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) 1504#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
1503#define IS_VF_FLAG (1 << 22) 1505#define IS_VF_FLAG (1 << 22)
1504#define INTERRUPTS_ENABLED_FLAG (1 << 23) 1506#define INTERRUPTS_ENABLED_FLAG (1 << 23)
1507#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
1505 1508
1506#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) 1509#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
1507 1510
@@ -1830,6 +1833,8 @@ struct bnx2x {
1830 1833
1831 int fp_array_size; 1834 int fp_array_size;
1832 u32 dump_preset_idx; 1835 u32 dump_preset_idx;
1836 bool stats_started;
1837 struct semaphore stats_sema;
1833}; 1838};
1834 1839
1835/* Tx queues may be less or equal to Rx queues */ 1840/* Tx queues may be less or equal to Rx queues */
@@ -2451,4 +2456,6 @@ enum bnx2x_pci_bus_speed {
2451 BNX2X_PCI_LINK_SPEED_5000 = 5000, 2456 BNX2X_PCI_LINK_SPEED_5000 = 5000,
2452 BNX2X_PCI_LINK_SPEED_8000 = 8000 2457 BNX2X_PCI_LINK_SPEED_8000 = 8000
2453}; 2458};
2459
2460void bnx2x_set_local_cmng(struct bnx2x *bp);
2454#endif /* bnx2x.h */ 2461#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ee350bde1818..f2d1ff10054b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3117,7 +3117,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
3117 return work_done; 3117 return work_done;
3118} 3118}
3119 3119
3120#ifdef CONFIG_NET_LL_RX_POLL 3120#ifdef CONFIG_NET_RX_BUSY_POLL
3121/* must be called with local_bh_disable()d */ 3121/* must be called with local_bh_disable()d */
3122int bnx2x_low_latency_recv(struct napi_struct *napi) 3122int bnx2x_low_latency_recv(struct napi_struct *napi)
3123{ 3123{
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 0c94df47e0e8..fcf2761d8828 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -30,10 +30,8 @@
30#include "bnx2x_dcb.h" 30#include "bnx2x_dcb.h"
31 31
32/* forward declarations of dcbx related functions */ 32/* forward declarations of dcbx related functions */
33static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
34static void bnx2x_pfc_set_pfc(struct bnx2x *bp); 33static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
35static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); 34static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
36static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
37static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, 35static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
38 u32 *set_configuration_ets_pg, 36 u32 *set_configuration_ets_pg,
39 u32 *pri_pg_tbl); 37 u32 *pri_pg_tbl);
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
425 bnx2x_pfc_clear(bp); 423 bnx2x_pfc_clear(bp);
426} 424}
427 425
428static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) 426int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
429{ 427{
430 struct bnx2x_func_state_params func_params = {NULL}; 428 struct bnx2x_func_state_params func_params = {NULL};
429 int rc;
431 430
432 func_params.f_obj = &bp->func_obj; 431 func_params.f_obj = &bp->func_obj;
433 func_params.cmd = BNX2X_F_CMD_TX_STOP; 432 func_params.cmd = BNX2X_F_CMD_TX_STOP;
434 433
434 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
435 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
436
435 DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); 437 DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
436 return bnx2x_func_state_change(bp, &func_params); 438
439 rc = bnx2x_func_state_change(bp, &func_params);
440 if (rc) {
441 BNX2X_ERR("Unable to hold traffic for HW configuration\n");
442 bnx2x_panic();
443 }
444
445 return rc;
437} 446}
438 447
439static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) 448int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
440{ 449{
441 struct bnx2x_func_state_params func_params = {NULL}; 450 struct bnx2x_func_state_params func_params = {NULL};
442 struct bnx2x_func_tx_start_params *tx_params = 451 struct bnx2x_func_tx_start_params *tx_params =
443 &func_params.params.tx_start; 452 &func_params.params.tx_start;
453 int rc;
444 454
445 func_params.f_obj = &bp->func_obj; 455 func_params.f_obj = &bp->func_obj;
446 func_params.cmd = BNX2X_F_CMD_TX_START; 456 func_params.cmd = BNX2X_F_CMD_TX_START;
447 457
458 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
459 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
460
448 bnx2x_dcbx_fw_struct(bp, tx_params); 461 bnx2x_dcbx_fw_struct(bp, tx_params);
449 462
450 DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); 463 DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
451 return bnx2x_func_state_change(bp, &func_params); 464
465 rc = bnx2x_func_state_change(bp, &func_params);
466 if (rc) {
467 BNX2X_ERR("Unable to resume traffic after HW configuration\n");
468 bnx2x_panic();
469 }
470
471 return rc;
452} 472}
453 473
454static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) 474static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
@@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
744 if (IS_MF(bp)) 764 if (IS_MF(bp))
745 bnx2x_link_sync_notify(bp); 765 bnx2x_link_sync_notify(bp);
746 766
747 bnx2x_dcbx_stop_hw_tx(bp); 767 set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
768
769 schedule_delayed_work(&bp->sp_rtnl_task, 0);
748 770
749 return; 771 return;
750 } 772 }
@@ -753,7 +775,13 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
753 bnx2x_pfc_set_pfc(bp); 775 bnx2x_pfc_set_pfc(bp);
754 776
755 bnx2x_dcbx_update_ets_params(bp); 777 bnx2x_dcbx_update_ets_params(bp);
756 bnx2x_dcbx_resume_hw_tx(bp); 778
779 /* ets may affect cmng configuration: reinit it in hw */
780 bnx2x_set_local_cmng(bp);
781
782 set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
783
784 schedule_delayed_work(&bp->sp_rtnl_task, 0);
757 785
758 return; 786 return;
759 case BNX2X_DCBX_STATE_TX_RELEASED: 787 case BNX2X_DCBX_STATE_TX_RELEASED:
@@ -2363,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
2363 case DCB_FEATCFG_ATTR_PG: 2391 case DCB_FEATCFG_ATTR_PG:
2364 if (bp->dcbx_local_feat.ets.enabled) 2392 if (bp->dcbx_local_feat.ets.enabled)
2365 *flags |= DCB_FEATCFG_ENABLE; 2393 *flags |= DCB_FEATCFG_ENABLE;
2366 if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) 2394 if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
2395 DCBX_REMOTE_MIB_ERROR))
2367 *flags |= DCB_FEATCFG_ERROR; 2396 *flags |= DCB_FEATCFG_ERROR;
2368 break; 2397 break;
2369 case DCB_FEATCFG_ATTR_PFC: 2398 case DCB_FEATCFG_ATTR_PFC:
2370 if (bp->dcbx_local_feat.pfc.enabled) 2399 if (bp->dcbx_local_feat.pfc.enabled)
2371 *flags |= DCB_FEATCFG_ENABLE; 2400 *flags |= DCB_FEATCFG_ENABLE;
2372 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | 2401 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
2373 DCBX_LOCAL_PFC_MISMATCH)) 2402 DCBX_LOCAL_PFC_MISMATCH |
2403 DCBX_REMOTE_MIB_ERROR))
2374 *flags |= DCB_FEATCFG_ERROR; 2404 *flags |= DCB_FEATCFG_ERROR;
2375 break; 2405 break;
2376 case DCB_FEATCFG_ATTR_APP: 2406 case DCB_FEATCFG_ATTR_APP:
2377 if (bp->dcbx_local_feat.app.enabled) 2407 if (bp->dcbx_local_feat.app.enabled)
2378 *flags |= DCB_FEATCFG_ENABLE; 2408 *flags |= DCB_FEATCFG_ENABLE;
2379 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | 2409 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
2380 DCBX_LOCAL_APP_MISMATCH)) 2410 DCBX_LOCAL_APP_MISMATCH |
2411 DCBX_REMOTE_MIB_ERROR))
2381 *flags |= DCB_FEATCFG_ERROR; 2412 *flags |= DCB_FEATCFG_ERROR;
2382 break; 2413 break;
2383 default: 2414 default:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 125bd1b6586f..804b8f64463e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
199int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); 199int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
200#endif /* BCM_DCBNL */ 200#endif /* BCM_DCBNL */
201 201
202int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
203int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
204
202#endif /* BNX2X_DCB_H */ 205#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5018e52ae2ad..32767f6aa33f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1300,6 +1300,9 @@ struct drv_func_mb {
1300 1300
1301 #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 1301 #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
1302 1302
1303 #define DRV_MSG_CODE_RMMOD 0xdb000000
1304 #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
1305
1303 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 1306 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
1304 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 1307 #define REQ_BC_VER_4_SET_MF_BW 0x00060202
1305 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 1308 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1372,6 +1375,8 @@ struct drv_func_mb {
1372 1375
1373 #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 1376 #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
1374 1377
1378 #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
1379
1375 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 1380 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
1376 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 1381 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
1377 1382
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e5da07858a2f..8bdc8b973007 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp)
2261 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2261 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2262} 2262}
2263 2263
2264static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2265{
2266 u32 pause_enabled = 0;
2267
2268 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2269 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2270 pause_enabled = 1;
2271
2272 REG_WR(bp, BAR_USTRORM_INTMEM +
2273 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2274 pause_enabled);
2275 }
2276
2277 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2278 pause_enabled ? "enabled" : "disabled");
2279}
2280
2264int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2281int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2265{ 2282{
2266 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); 2283 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
@@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2294 2311
2295 bnx2x_release_phy_lock(bp); 2312 bnx2x_release_phy_lock(bp);
2296 2313
2314 bnx2x_init_dropless_fc(bp);
2315
2297 bnx2x_calc_fc_adv(bp); 2316 bnx2x_calc_fc_adv(bp);
2298 2317
2299 if (bp->link_vars.link_up) { 2318 if (bp->link_vars.link_up) {
@@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp)
2315 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2334 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2316 bnx2x_release_phy_lock(bp); 2335 bnx2x_release_phy_lock(bp);
2317 2336
2337 bnx2x_init_dropless_fc(bp);
2338
2318 bnx2x_calc_fc_adv(bp); 2339 bnx2x_calc_fc_adv(bp);
2319 } else 2340 } else
2320 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2341 BNX2X_ERR("Bootcode is missing - can not set link\n");
@@ -2476,7 +2497,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2476 2497
2477 input.port_rate = bp->link_vars.line_speed; 2498 input.port_rate = bp->link_vars.line_speed;
2478 2499
2479 if (cmng_type == CMNG_FNS_MINMAX) { 2500 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2480 int vn; 2501 int vn;
2481 2502
2482 /* read mf conf from shmem */ 2503 /* read mf conf from shmem */
@@ -2533,6 +2554,21 @@ static void storm_memset_cmng(struct bnx2x *bp,
2533 } 2554 }
2534} 2555}
2535 2556
2557/* init cmng mode in HW according to local configuration */
2558void bnx2x_set_local_cmng(struct bnx2x *bp)
2559{
2560 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2561
2562 if (cmng_fns != CMNG_FNS_NONE) {
2563 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2564 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2565 } else {
2566 /* rate shaping and fairness are disabled */
2567 DP(NETIF_MSG_IFUP,
2568 "single function mode without fairness\n");
2569 }
2570}
2571
2536/* This function is called upon link interrupt */ 2572/* This function is called upon link interrupt */
2537static void bnx2x_link_attn(struct bnx2x *bp) 2573static void bnx2x_link_attn(struct bnx2x *bp)
2538{ 2574{
@@ -2541,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2541 2577
2542 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2578 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2543 2579
2544 if (bp->link_vars.link_up) { 2580 bnx2x_init_dropless_fc(bp);
2545 2581
2546 /* dropless flow control */ 2582 if (bp->link_vars.link_up) {
2547 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2548 int port = BP_PORT(bp);
2549 u32 pause_enabled = 0;
2550
2551 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2552 pause_enabled = 1;
2553
2554 REG_WR(bp, BAR_USTRORM_INTMEM +
2555 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2556 pause_enabled);
2557 }
2558 2583
2559 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2584 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2560 struct host_port_stats *pstats; 2585 struct host_port_stats *pstats;
@@ -2568,17 +2593,8 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2568 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2593 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2569 } 2594 }
2570 2595
2571 if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2596 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2572 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2597 bnx2x_set_local_cmng(bp);
2573
2574 if (cmng_fns != CMNG_FNS_NONE) {
2575 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2576 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2577 } else
2578 /* rate shaping and fairness are disabled */
2579 DP(NETIF_MSG_IFUP,
2580 "single function mode without fairness\n");
2581 }
2582 2598
2583 __bnx2x_link_report(bp); 2599 __bnx2x_link_report(bp);
2584 2600
@@ -9639,6 +9655,12 @@ sp_rtnl_not_reset:
9639 &bp->sp_rtnl_state)) 9655 &bp->sp_rtnl_state))
9640 bnx2x_pf_set_vfs_vlan(bp); 9656 bnx2x_pf_set_vfs_vlan(bp);
9641 9657
9658 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
9659 bnx2x_dcbx_stop_hw_tx(bp);
9660
9661 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
9662 bnx2x_dcbx_resume_hw_tx(bp);
9663
9642 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9664 /* work which needs rtnl lock not-taken (as it takes the lock itself and
9643 * can be called from other contexts as well) 9665 * can be called from other contexts as well)
9644 */ 9666 */
@@ -10362,6 +10384,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10362 10384
10363 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? 10385 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10364 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; 10386 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
10387
10388 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
10389 BC_SUPPORTS_RMMOD_CMD : 0;
10390
10365 boot_mode = SHMEM_RD(bp, 10391 boot_mode = SHMEM_RD(bp,
10366 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 10392 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10367 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 10393 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -11137,6 +11163,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11137 int tmp; 11163 int tmp;
11138 u32 cfg; 11164 u32 cfg;
11139 11165
11166 if (IS_VF(bp))
11167 return 0;
11168
11140 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { 11169 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11141 /* Take function: tmp = func */ 11170 /* Take function: tmp = func */
11142 tmp = BP_ABS_FUNC(bp); 11171 tmp = BP_ABS_FUNC(bp);
@@ -11524,6 +11553,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11524 mutex_init(&bp->port.phy_mutex); 11553 mutex_init(&bp->port.phy_mutex);
11525 mutex_init(&bp->fw_mb_mutex); 11554 mutex_init(&bp->fw_mb_mutex);
11526 spin_lock_init(&bp->stats_lock); 11555 spin_lock_init(&bp->stats_lock);
11556 sema_init(&bp->stats_sema, 1);
11527 11557
11528 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11558 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11529 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11559 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12026,7 +12056,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
12026 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 12056 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
12027#endif 12057#endif
12028 12058
12029#ifdef CONFIG_NET_LL_RX_POLL 12059#ifdef CONFIG_NET_RX_BUSY_POLL
12030 .ndo_busy_poll = bnx2x_low_latency_recv, 12060 .ndo_busy_poll = bnx2x_low_latency_recv,
12031#endif 12061#endif
12032}; 12062};
@@ -12817,13 +12847,17 @@ static void __bnx2x_remove(struct pci_dev *pdev,
12817 bnx2x_dcbnl_update_applist(bp, true); 12847 bnx2x_dcbnl_update_applist(bp, true);
12818#endif 12848#endif
12819 12849
12850 if (IS_PF(bp) &&
12851 !BP_NOMCP(bp) &&
12852 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
12853 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
12854
12820 /* Close the interface - either directly or implicitly */ 12855 /* Close the interface - either directly or implicitly */
12821 if (remove_netdev) { 12856 if (remove_netdev) {
12822 unregister_netdev(dev); 12857 unregister_netdev(dev);
12823 } else { 12858 } else {
12824 rtnl_lock(); 12859 rtnl_lock();
12825 if (netif_running(dev)) 12860 dev_close(dev);
12826 bnx2x_close(dev);
12827 rtnl_unlock(); 12861 rtnl_unlock();
12828 } 12862 }
12829 12863
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 95861efb5051..ad83f4b48777 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1747,11 +1747,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1747 1747
1748void bnx2x_iov_init_dmae(struct bnx2x *bp) 1748void bnx2x_iov_init_dmae(struct bnx2x *bp)
1749{ 1749{
1750 DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); 1750 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1751 if (!IS_SRIOV(bp)) 1751 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1752 return;
1753
1754 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1755} 1752}
1756 1753
1757static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1754static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
@@ -3084,8 +3081,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
3084 pci_disable_sriov(bp->pdev); 3081 pci_disable_sriov(bp->pdev);
3085} 3082}
3086 3083
3087static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, 3084static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3088 struct bnx2x_virtf *vf) 3085 struct bnx2x_virtf **vf,
3086 struct pf_vf_bulletin_content **bulletin)
3089{ 3087{
3090 if (bp->state != BNX2X_STATE_OPEN) { 3088 if (bp->state != BNX2X_STATE_OPEN) {
3091 BNX2X_ERR("vf ndo called though PF is down\n"); 3089 BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3103,12 +3101,22 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
3103 return -EINVAL; 3101 return -EINVAL;
3104 } 3102 }
3105 3103
3106 if (!vf) { 3104 /* init members */
3105 *vf = BP_VF(bp, vfidx);
3106 *bulletin = BP_VF_BULLETIN(bp, vfidx);
3107
3108 if (!*vf) {
3107 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3109 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
3108 vfidx); 3110 vfidx);
3109 return -EINVAL; 3111 return -EINVAL;
3110 } 3112 }
3111 3113
3114 if (!*bulletin) {
3115 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
3116 vfidx);
3117 return -EINVAL;
3118 }
3119
3112 return 0; 3120 return 0;
3113} 3121}
3114 3122
@@ -3116,17 +3124,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3116 struct ifla_vf_info *ivi) 3124 struct ifla_vf_info *ivi)
3117{ 3125{
3118 struct bnx2x *bp = netdev_priv(dev); 3126 struct bnx2x *bp = netdev_priv(dev);
3119 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3127 struct bnx2x_virtf *vf = NULL;
3120 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3128 struct pf_vf_bulletin_content *bulletin = NULL;
3121 struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3129 struct bnx2x_vlan_mac_obj *mac_obj;
3122 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3130 struct bnx2x_vlan_mac_obj *vlan_obj;
3123 int rc; 3131 int rc;
3124 3132
3125 /* sanity */ 3133 /* sanity and init */
3126 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3134 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3127 if (rc) 3135 if (rc)
3128 return rc; 3136 return rc;
3129 if (!mac_obj || !vlan_obj || !bulletin) { 3137 mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3138 vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
3139 if (!mac_obj || !vlan_obj) {
3130 BNX2X_ERR("VF partially initialized\n"); 3140 BNX2X_ERR("VF partially initialized\n");
3131 return -EINVAL; 3141 return -EINVAL;
3132 } 3142 }
@@ -3183,11 +3193,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3183{ 3193{
3184 struct bnx2x *bp = netdev_priv(dev); 3194 struct bnx2x *bp = netdev_priv(dev);
3185 int rc, q_logical_state; 3195 int rc, q_logical_state;
3186 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3196 struct bnx2x_virtf *vf = NULL;
3187 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3197 struct pf_vf_bulletin_content *bulletin = NULL;
3188 3198
3189 /* sanity */ 3199 /* sanity and init */
3190 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3200 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3191 if (rc) 3201 if (rc)
3192 return rc; 3202 return rc;
3193 if (!is_valid_ether_addr(mac)) { 3203 if (!is_valid_ether_addr(mac)) {
@@ -3249,11 +3259,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3249{ 3259{
3250 struct bnx2x *bp = netdev_priv(dev); 3260 struct bnx2x *bp = netdev_priv(dev);
3251 int rc, q_logical_state; 3261 int rc, q_logical_state;
3252 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3262 struct bnx2x_virtf *vf = NULL;
3253 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3263 struct pf_vf_bulletin_content *bulletin = NULL;
3254 3264
3255 /* sanity */ 3265 /* sanity and init */
3256 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3266 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3257 if (rc) 3267 if (rc)
3258 return rc; 3268 return rc;
3259 3269
@@ -3463,7 +3473,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3463alloc_mem_err: 3473alloc_mem_err:
3464 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3474 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3465 sizeof(struct bnx2x_vf_mbx_msg)); 3475 sizeof(struct bnx2x_vf_mbx_msg));
3466 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3476 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3467 sizeof(union pf_vf_bulletin)); 3477 sizeof(union pf_vf_bulletin));
3468 return -ENOMEM; 3478 return -ENOMEM;
3469} 3479}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 98366abd02bd..d63d1327b051 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
221 * Statistics service functions 221 * Statistics service functions
222 */ 222 */
223 223
224static void bnx2x_stats_pmf_update(struct bnx2x *bp) 224/* should be called under stats_sema */
225static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
225{ 226{
226 struct dmae_command *dmae; 227 struct dmae_command *dmae;
227 u32 opcode; 228 u32 opcode;
@@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
518 *stats_comp = 0; 519 *stats_comp = 0;
519} 520}
520 521
521static void bnx2x_stats_start(struct bnx2x *bp) 522/* should be called under stats_sema */
523static void __bnx2x_stats_start(struct bnx2x *bp)
522{ 524{
523 /* vfs travel through here as part of the statistics FSM, but no action 525 /* vfs travel through here as part of the statistics FSM, but no action
524 * is required 526 * is required
@@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp)
534 536
535 bnx2x_hw_stats_post(bp); 537 bnx2x_hw_stats_post(bp);
536 bnx2x_storm_stats_post(bp); 538 bnx2x_storm_stats_post(bp);
539
540 bp->stats_started = true;
541}
542
543static void bnx2x_stats_start(struct bnx2x *bp)
544{
545 if (down_timeout(&bp->stats_sema, HZ/10))
546 BNX2X_ERR("Unable to acquire stats lock\n");
547 __bnx2x_stats_start(bp);
548 up(&bp->stats_sema);
537} 549}
538 550
539static void bnx2x_stats_pmf_start(struct bnx2x *bp) 551static void bnx2x_stats_pmf_start(struct bnx2x *bp)
540{ 552{
553 if (down_timeout(&bp->stats_sema, HZ/10))
554 BNX2X_ERR("Unable to acquire stats lock\n");
541 bnx2x_stats_comp(bp); 555 bnx2x_stats_comp(bp);
542 bnx2x_stats_pmf_update(bp); 556 __bnx2x_stats_pmf_update(bp);
543 bnx2x_stats_start(bp); 557 __bnx2x_stats_start(bp);
558 up(&bp->stats_sema);
559}
560
561static void bnx2x_stats_pmf_update(struct bnx2x *bp)
562{
563 if (down_timeout(&bp->stats_sema, HZ/10))
564 BNX2X_ERR("Unable to acquire stats lock\n");
565 __bnx2x_stats_pmf_update(bp);
566 up(&bp->stats_sema);
544} 567}
545 568
546static void bnx2x_stats_restart(struct bnx2x *bp) 569static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
550 */ 573 */
551 if (IS_VF(bp)) 574 if (IS_VF(bp))
552 return; 575 return;
576 if (down_timeout(&bp->stats_sema, HZ/10))
577 BNX2X_ERR("Unable to acquire stats lock\n");
553 bnx2x_stats_comp(bp); 578 bnx2x_stats_comp(bp);
554 bnx2x_stats_start(bp); 579 __bnx2x_stats_start(bp);
580 up(&bp->stats_sema);
555} 581}
556 582
557static void bnx2x_bmac_stats_update(struct bnx2x *bp) 583static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
888 /* Make sure we use the value of the counter 914 /* Make sure we use the value of the counter
889 * used for sending the last stats ramrod. 915 * used for sending the last stats ramrod.
890 */ 916 */
891 spin_lock_bh(&bp->stats_lock);
892 cur_stats_counter = bp->stats_counter - 1; 917 cur_stats_counter = bp->stats_counter - 1;
893 spin_unlock_bh(&bp->stats_lock);
894 918
895 /* are storm stats valid? */ 919 /* are storm stats valid? */
896 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 920 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
@@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1227{ 1251{
1228 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1252 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1229 1253
1230 if (bnx2x_edebug_stats_stopped(bp)) 1254 /* we run update from timer context, so give up
1255 * if somebody is in the middle of transition
1256 */
1257 if (down_trylock(&bp->stats_sema))
1231 return; 1258 return;
1232 1259
1260 if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
1261 goto out;
1262
1233 if (IS_PF(bp)) { 1263 if (IS_PF(bp)) {
1234 if (*stats_comp != DMAE_COMP_VAL) 1264 if (*stats_comp != DMAE_COMP_VAL)
1235 return; 1265 goto out;
1236 1266
1237 if (bp->port.pmf) 1267 if (bp->port.pmf)
1238 bnx2x_hw_stats_update(bp); 1268 bnx2x_hw_stats_update(bp);
@@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1242 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1272 BNX2X_ERR("storm stats were not updated for 3 times\n");
1243 bnx2x_panic(); 1273 bnx2x_panic();
1244 } 1274 }
1245 return; 1275 goto out;
1246 } 1276 }
1247 } else { 1277 } else {
1248 /* vf doesn't collect HW statistics, and doesn't get completions 1278 /* vf doesn't collect HW statistics, and doesn't get completions
@@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1256 1286
1257 /* vf is done */ 1287 /* vf is done */
1258 if (IS_VF(bp)) 1288 if (IS_VF(bp))
1259 return; 1289 goto out;
1260 1290
1261 if (netif_msg_timer(bp)) { 1291 if (netif_msg_timer(bp)) {
1262 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1292 struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1267 1297
1268 bnx2x_hw_stats_post(bp); 1298 bnx2x_hw_stats_post(bp);
1269 bnx2x_storm_stats_post(bp); 1299 bnx2x_storm_stats_post(bp);
1300
1301out:
1302 up(&bp->stats_sema);
1270} 1303}
1271 1304
1272static void bnx2x_port_stats_stop(struct bnx2x *bp) 1305static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
1332{ 1365{
1333 int update = 0; 1366 int update = 0;
1334 1367
1368 if (down_timeout(&bp->stats_sema, HZ/10))
1369 BNX2X_ERR("Unable to acquire stats lock\n");
1370
1371 bp->stats_started = false;
1372
1335 bnx2x_stats_comp(bp); 1373 bnx2x_stats_comp(bp);
1336 1374
1337 if (bp->port.pmf) 1375 if (bp->port.pmf)
@@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
1348 bnx2x_hw_stats_post(bp); 1386 bnx2x_hw_stats_post(bp);
1349 bnx2x_stats_comp(bp); 1387 bnx2x_stats_comp(bp);
1350 } 1388 }
1389
1390 up(&bp->stats_sema);
1351} 1391}
1352 1392
1353static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1393static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1376,15 +1416,17 @@ static const struct {
1376void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1416void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1377{ 1417{
1378 enum bnx2x_stats_state state; 1418 enum bnx2x_stats_state state;
1419 void (*action)(struct bnx2x *bp);
1379 if (unlikely(bp->panic)) 1420 if (unlikely(bp->panic))
1380 return; 1421 return;
1381 1422
1382 spin_lock_bh(&bp->stats_lock); 1423 spin_lock_bh(&bp->stats_lock);
1383 state = bp->stats_state; 1424 state = bp->stats_state;
1384 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1425 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1426 action = bnx2x_stats_stm[state][event].action;
1385 spin_unlock_bh(&bp->stats_lock); 1427 spin_unlock_bh(&bp->stats_lock);
1386 1428
1387 bnx2x_stats_stm[state][event].action(bp); 1429 action(bp);
1388 1430
1389 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1431 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1390 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1432 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d964f302ac94..0da2214ef1b9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17625,7 +17625,8 @@ err_out_free_res:
17625 pci_release_regions(pdev); 17625 pci_release_regions(pdev);
17626 17626
17627err_out_disable_pdev: 17627err_out_disable_pdev:
17628 pci_disable_device(pdev); 17628 if (pci_is_enabled(pdev))
17629 pci_disable_device(pdev);
17629 pci_set_drvdata(pdev, NULL); 17630 pci_set_drvdata(pdev, NULL);
17630 return err; 17631 return err;
17631} 17632}
@@ -17773,7 +17774,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17773 17774
17774 rtnl_lock(); 17775 rtnl_lock();
17775 17776
17776 if (!netif_running(netdev)) 17777 /* We probably don't have netdev yet */
17778 if (!netdev || !netif_running(netdev))
17777 goto done; 17779 goto done;
17778 17780
17779 tg3_phy_stop(tp); 17781 tg3_phy_stop(tp);
@@ -17794,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17794 17796
17795done: 17797done:
17796 if (state == pci_channel_io_perm_failure) { 17798 if (state == pci_channel_io_perm_failure) {
17797 tg3_napi_enable(tp); 17799 if (netdev) {
17798 dev_close(netdev); 17800 tg3_napi_enable(tp);
17801 dev_close(netdev);
17802 }
17799 err = PCI_ERS_RESULT_DISCONNECT; 17803 err = PCI_ERS_RESULT_DISCONNECT;
17800 } else { 17804 } else {
17801 pci_disable_device(pdev); 17805 pci_disable_device(pdev);
@@ -17825,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17825 rtnl_lock(); 17829 rtnl_lock();
17826 17830
17827 if (pci_enable_device(pdev)) { 17831 if (pci_enable_device(pdev)) {
17828 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); 17832 dev_err(&pdev->dev,
17833 "Cannot re-enable PCI device after reset.\n");
17829 goto done; 17834 goto done;
17830 } 17835 }
17831 17836
@@ -17833,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17833 pci_restore_state(pdev); 17838 pci_restore_state(pdev);
17834 pci_save_state(pdev); 17839 pci_save_state(pdev);
17835 17840
17836 if (!netif_running(netdev)) { 17841 if (!netdev || !netif_running(netdev)) {
17837 rc = PCI_ERS_RESULT_RECOVERED; 17842 rc = PCI_ERS_RESULT_RECOVERED;
17838 goto done; 17843 goto done;
17839 } 17844 }
@@ -17845,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17845 rc = PCI_ERS_RESULT_RECOVERED; 17850 rc = PCI_ERS_RESULT_RECOVERED;
17846 17851
17847done: 17852done:
17848 if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { 17853 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17849 tg3_napi_enable(tp); 17854 tg3_napi_enable(tp);
17850 dev_close(netdev); 17855 dev_close(netdev);
17851 } 17856 }
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 687ec4a8bb48..9c89dc8fe105 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
455 q->pg_chunk.offset = 0; 455 q->pg_chunk.offset = 0;
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457 0, q->alloc_size, PCI_DMA_FROMDEVICE); 457 0, q->alloc_size, PCI_DMA_FROMDEVICE);
458 if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459 __free_pages(q->pg_chunk.page, order);
460 q->pg_chunk.page = NULL;
461 return -EIO;
462 }
463 q->pg_chunk.mapping = mapping; 458 q->pg_chunk.mapping = mapping;
464 } 459 }
465 sd->pg_chunk = q->pg_chunk; 460 sd->pg_chunk = q->pg_chunk;
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
954 return flits_to_desc(flits); 949 return flits_to_desc(flits);
955} 950}
956 951
957
958/* map_skb - map a packet main body and its page fragments
959 * @pdev: the PCI device
960 * @skb: the packet
961 * @addr: placeholder to save the mapped addresses
962 *
963 * map the main body of an sk_buff and its page fragments, if any.
964 */
965static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
966 dma_addr_t *addr)
967{
968 const skb_frag_t *fp, *end;
969 const struct skb_shared_info *si;
970
971 *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
972 PCI_DMA_TODEVICE);
973 if (pci_dma_mapping_error(pdev, *addr))
974 goto out_err;
975
976 si = skb_shinfo(skb);
977 end = &si->frags[si->nr_frags];
978
979 for (fp = si->frags; fp < end; fp++) {
980 *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
981 DMA_TO_DEVICE);
982 if (pci_dma_mapping_error(pdev, *addr))
983 goto unwind;
984 }
985 return 0;
986
987unwind:
988 while (fp-- > si->frags)
989 dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
990 DMA_TO_DEVICE);
991
992 pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
993out_err:
994 return -ENOMEM;
995}
996
997/** 952/**
998 * write_sgl - populate a scatter/gather list for a packet 953 * make_sgl - populate a scatter/gather list for a packet
999 * @skb: the packet 954 * @skb: the packet
1000 * @sgp: the SGL to populate 955 * @sgp: the SGL to populate
1001 * @start: start address of skb main body data to include in the SGL 956 * @start: start address of skb main body data to include in the SGL
1002 * @len: length of skb main body data to include in the SGL 957 * @len: length of skb main body data to include in the SGL
1003 * @addr: the list of the mapped addresses 958 * @pdev: the PCI device
1004 * 959 *
1005 * Copies the scatter/gather list for the buffers that make up a packet 960 * Generates a scatter/gather list for the buffers that make up a packet
1006 * and returns the SGL size in 8-byte words. The caller must size the SGL 961 * and returns the SGL size in 8-byte words. The caller must size the SGL
1007 * appropriately. 962 * appropriately.
1008 */ 963 */
1009static inline unsigned int write_sgl(const struct sk_buff *skb, 964static inline unsigned int make_sgl(const struct sk_buff *skb,
1010 struct sg_ent *sgp, unsigned char *start, 965 struct sg_ent *sgp, unsigned char *start,
1011 unsigned int len, const dma_addr_t *addr) 966 unsigned int len, struct pci_dev *pdev)
1012{ 967{
1013 unsigned int i, j = 0, k = 0, nfrags; 968 dma_addr_t mapping;
969 unsigned int i, j = 0, nfrags;
1014 970
1015 if (len) { 971 if (len) {
972 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
1016 sgp->len[0] = cpu_to_be32(len); 973 sgp->len[0] = cpu_to_be32(len);
1017 sgp->addr[j++] = cpu_to_be64(addr[k++]); 974 sgp->addr[0] = cpu_to_be64(mapping);
975 j = 1;
1018 } 976 }
1019 977
1020 nfrags = skb_shinfo(skb)->nr_frags; 978 nfrags = skb_shinfo(skb)->nr_frags;
1021 for (i = 0; i < nfrags; i++) { 979 for (i = 0; i < nfrags; i++) {
1022 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 980 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1023 981
982 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
983 DMA_TO_DEVICE);
1024 sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); 984 sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1025 sgp->addr[j] = cpu_to_be64(addr[k++]); 985 sgp->addr[j] = cpu_to_be64(mapping);
1026 j ^= 1; 986 j ^= 1;
1027 if (j == 0) 987 if (j == 0)
1028 ++sgp; 988 ++sgp;
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1178 const struct port_info *pi, 1138 const struct port_info *pi,
1179 unsigned int pidx, unsigned int gen, 1139 unsigned int pidx, unsigned int gen,
1180 struct sge_txq *q, unsigned int ndesc, 1140 struct sge_txq *q, unsigned int ndesc,
1181 unsigned int compl, const dma_addr_t *addr) 1141 unsigned int compl)
1182{ 1142{
1183 unsigned int flits, sgl_flits, cntrl, tso_info; 1143 unsigned int flits, sgl_flits, cntrl, tso_info;
1184 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; 1144 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1236 } 1196 }
1237 1197
1238 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1198 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1239 sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); 1199 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1240 1200
1241 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, 1201 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1242 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), 1202 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1267 struct netdev_queue *txq; 1227 struct netdev_queue *txq;
1268 struct sge_qset *qs; 1228 struct sge_qset *qs;
1269 struct sge_txq *q; 1229 struct sge_txq *q;
1270 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1271 1230
1272 /* 1231 /*
1273 * The chip min packet length is 9 octets but play safe and reject 1232 * The chip min packet length is 9 octets but play safe and reject
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1296 return NETDEV_TX_BUSY; 1255 return NETDEV_TX_BUSY;
1297 } 1256 }
1298 1257
1299 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1300 dev_kfree_skb(skb);
1301 return NETDEV_TX_OK;
1302 }
1303
1304 q->in_use += ndesc; 1258 q->in_use += ndesc;
1305 if (unlikely(credits - ndesc < q->stop_thres)) { 1259 if (unlikely(credits - ndesc < q->stop_thres)) {
1306 t3_stop_tx_queue(txq, qs, q); 1260 t3_stop_tx_queue(txq, qs, q);
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1358 if (likely(!skb_shared(skb))) 1312 if (likely(!skb_shared(skb)))
1359 skb_orphan(skb); 1313 skb_orphan(skb);
1360 1314
1361 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); 1315 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1362 check_ring_tx_db(adap, q); 1316 check_ring_tx_db(adap, q);
1363 return NETDEV_TX_OK; 1317 return NETDEV_TX_OK;
1364} 1318}
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1623 */ 1577 */
1624static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, 1578static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1625 struct sge_txq *q, unsigned int pidx, 1579 struct sge_txq *q, unsigned int pidx,
1626 unsigned int gen, unsigned int ndesc, 1580 unsigned int gen, unsigned int ndesc)
1627 const dma_addr_t *addr)
1628{ 1581{
1629 unsigned int sgl_flits, flits; 1582 unsigned int sgl_flits, flits;
1630 struct work_request_hdr *from; 1583 struct work_request_hdr *from;
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1645 1598
1646 flits = skb_transport_offset(skb) / 8; 1599 flits = skb_transport_offset(skb) / 8;
1647 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1600 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1648 sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), 1601 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1649 skb_tail_pointer(skb) - 1602 skb->tail - skb->transport_header,
1650 skb_transport_header(skb), addr); 1603 adap->pdev);
1651 if (need_skb_unmap()) { 1604 if (need_skb_unmap()) {
1652 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1605 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1653 skb->destructor = deferred_unmap_destructor; 1606 skb->destructor = deferred_unmap_destructor;
@@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1705 goto again; 1658 goto again;
1706 } 1659 }
1707 1660
1708 if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1709 spin_unlock(&q->lock);
1710 return NET_XMIT_SUCCESS;
1711 }
1712
1713 gen = q->gen; 1661 gen = q->gen;
1714 q->in_use += ndesc; 1662 q->in_use += ndesc;
1715 pidx = q->pidx; 1663 pidx = q->pidx;
@@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1720 } 1668 }
1721 spin_unlock(&q->lock); 1669 spin_unlock(&q->lock);
1722 1670
1723 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); 1671 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1724 check_ring_tx_db(adap, q); 1672 check_ring_tx_db(adap, q);
1725 return NET_XMIT_SUCCESS; 1673 return NET_XMIT_SUCCESS;
1726} 1674}
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data)
1738 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 1686 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1739 const struct port_info *pi = netdev_priv(qs->netdev); 1687 const struct port_info *pi = netdev_priv(qs->netdev);
1740 struct adapter *adap = pi->adapter; 1688 struct adapter *adap = pi->adapter;
1741 unsigned int written = 0;
1742 1689
1743 spin_lock(&q->lock); 1690 spin_lock(&q->lock);
1744again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1691again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1758 break; 1705 break;
1759 } 1706 }
1760 1707
1761 if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1762 break;
1763
1764 gen = q->gen; 1708 gen = q->gen;
1765 q->in_use += ndesc; 1709 q->in_use += ndesc;
1766 pidx = q->pidx; 1710 pidx = q->pidx;
1767 q->pidx += ndesc; 1711 q->pidx += ndesc;
1768 written += ndesc;
1769 if (q->pidx >= q->size) { 1712 if (q->pidx >= q->size) {
1770 q->pidx -= q->size; 1713 q->pidx -= q->size;
1771 q->gen ^= 1; 1714 q->gen ^= 1;
@@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1773 __skb_unlink(skb, &q->sendq); 1716 __skb_unlink(skb, &q->sendq);
1774 spin_unlock(&q->lock); 1717 spin_unlock(&q->lock);
1775 1718
1776 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, 1719 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1777 (dma_addr_t *)skb->head);
1778 spin_lock(&q->lock); 1720 spin_lock(&q->lock);
1779 } 1721 }
1780 spin_unlock(&q->lock); 1722 spin_unlock(&q->lock);
@@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1784 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1726 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1785#endif 1727#endif
1786 wmb(); 1728 wmb();
1787 if (likely(written)) 1729 t3_write_reg(adap, A_SG_KDOORBELL,
1788 t3_write_reg(adap, A_SG_KDOORBELL, 1730 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1789 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1790} 1731}
1791 1732
1792/** 1733/**
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 6e6e0a117ee2..8ec5d74ad44d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
3048 3048
3049 adapter->max_event_queues = le16_to_cpu(desc->eq_count); 3049 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3050 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); 3050 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3051
3052 /* Clear flags that driver is not interested in */
3053 adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
3051 } 3054 }
3052err: 3055err:
3053 mutex_unlock(&adapter->mbox_lock); 3056 mutex_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 5228d88c5a02..1b3b9e886412 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -563,6 +563,12 @@ enum be_if_flags {
563 BE_IF_FLAGS_MULTICAST = 0x1000 563 BE_IF_FLAGS_MULTICAST = 0x1000
564}; 564};
565 565
566#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
567 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
568 BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
569 BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
570 BE_IF_FLAGS_UNTAGGED)
571
566/* An RX interface is an object with one or more MAC addresses and 572/* An RX interface is an object with one or more MAC addresses and
567 * filtering capabilities. */ 573 * filtering capabilities. */
568struct be_cmd_req_if_create { 574struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 181edb522450..4559c35eea13 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2563,8 +2563,8 @@ static int be_close(struct net_device *netdev)
2563 /* Wait for all pending tx completions to arrive so that 2563 /* Wait for all pending tx completions to arrive so that
2564 * all tx skbs are freed. 2564 * all tx skbs are freed.
2565 */ 2565 */
2566 be_tx_compl_clean(adapter);
2567 netif_tx_disable(netdev); 2566 netif_tx_disable(netdev);
2567 be_tx_compl_clean(adapter);
2568 2568
2569 be_rx_qs_destroy(adapter); 2569 be_rx_qs_destroy(adapter);
2570 2570
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 2b0a0ea4f8e7..ae236009f1a8 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -259,6 +259,7 @@ struct bufdesc_ex {
259struct fec_enet_delayed_work { 259struct fec_enet_delayed_work {
260 struct delayed_work delay_work; 260 struct delayed_work delay_work;
261 bool timeout; 261 bool timeout;
262 bool trig_tx;
262}; 263};
263 264
264/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 265/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d3ad5ea711d3..77ea0db0bbfc 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -93,6 +93,20 @@ static void set_multicast_list(struct net_device *ndev);
93#define FEC_QUIRK_HAS_CSUM (1 << 5) 93#define FEC_QUIRK_HAS_CSUM (1 << 5)
94/* Controller has hardware vlan support */ 94/* Controller has hardware vlan support */
95#define FEC_QUIRK_HAS_VLAN (1 << 6) 95#define FEC_QUIRK_HAS_VLAN (1 << 6)
96/* ENET IP errata ERR006358
97 *
98 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
99 * detected as not set during a prior frame transmission, then the
100 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
101 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
102 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
103 * detected as not set during a prior frame transmission, then the
104 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
105 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
106 * frames not being transmitted until there is a 0-to-1 transition on
107 * ENET_TDAR[TDAR].
108 */
109#define FEC_QUIRK_ERR006358 (1 << 7)
96 110
97static struct platform_device_id fec_devtype[] = { 111static struct platform_device_id fec_devtype[] = {
98 { 112 {
@@ -112,7 +126,7 @@ static struct platform_device_id fec_devtype[] = {
112 .name = "imx6q-fec", 126 .name = "imx6q-fec",
113 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 127 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
114 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 128 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
115 FEC_QUIRK_HAS_VLAN, 129 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
116 }, { 130 }, {
117 .name = "mvf600-fec", 131 .name = "mvf600-fec",
118 .driver_data = FEC_QUIRK_ENET_MAC, 132 .driver_data = FEC_QUIRK_ENET_MAC,
@@ -275,16 +289,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
275 struct fec_enet_private *fep = netdev_priv(ndev); 289 struct fec_enet_private *fep = netdev_priv(ndev);
276 const struct platform_device_id *id_entry = 290 const struct platform_device_id *id_entry =
277 platform_get_device_id(fep->pdev); 291 platform_get_device_id(fep->pdev);
278 struct bufdesc *bdp; 292 struct bufdesc *bdp, *bdp_pre;
279 void *bufaddr; 293 void *bufaddr;
280 unsigned short status; 294 unsigned short status;
281 unsigned int index; 295 unsigned int index;
282 296
283 if (!fep->link) {
284 /* Link is down or auto-negotiation is in progress. */
285 return NETDEV_TX_BUSY;
286 }
287
288 /* Fill in a Tx ring entry */ 297 /* Fill in a Tx ring entry */
289 bdp = fep->cur_tx; 298 bdp = fep->cur_tx;
290 299
@@ -370,6 +379,15 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
370 ebdp->cbd_esc |= BD_ENET_TX_PINS; 379 ebdp->cbd_esc |= BD_ENET_TX_PINS;
371 } 380 }
372 } 381 }
382
383 bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
384 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
385 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
386 fep->delay_work.trig_tx = true;
387 schedule_delayed_work(&(fep->delay_work.delay_work),
388 msecs_to_jiffies(1));
389 }
390
373 /* If this was the last BD in the ring, start at the beginning again. */ 391 /* If this was the last BD in the ring, start at the beginning again. */
374 if (status & BD_ENET_TX_WRAP) 392 if (status & BD_ENET_TX_WRAP)
375 bdp = fep->tx_bd_base; 393 bdp = fep->tx_bd_base;
@@ -689,6 +707,11 @@ static void fec_enet_work(struct work_struct *work)
689 fec_restart(fep->netdev, fep->full_duplex); 707 fec_restart(fep->netdev, fep->full_duplex);
690 netif_wake_queue(fep->netdev); 708 netif_wake_queue(fep->netdev);
691 } 709 }
710
711 if (fep->delay_work.trig_tx) {
712 fep->delay_work.trig_tx = false;
713 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
714 }
692} 715}
693 716
694static void 717static void
@@ -2279,4 +2302,5 @@ static struct platform_driver fec_driver = {
2279 2302
2280module_platform_driver(fec_driver); 2303module_platform_driver(fec_driver);
2281 2304
2305MODULE_ALIAS("platform:"DRIVER_NAME);
2282MODULE_LICENSE("GPL"); 2306MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 6a0c1b66ce54..c1d72c03cb59 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3739,9 +3739,8 @@ static void igb_set_rx_mode(struct net_device *netdev)
3739 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); 3739 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3740 3740
3741 if (netdev->flags & IFF_PROMISC) { 3741 if (netdev->flags & IFF_PROMISC) {
3742 u32 mrqc = rd32(E1000_MRQC);
3743 /* retain VLAN HW filtering if in VT mode */ 3742 /* retain VLAN HW filtering if in VT mode */
3744 if (mrqc & E1000_MRQC_ENABLE_VMDQ) 3743 if (adapter->vfs_allocated_count)
3745 rctl |= E1000_RCTL_VFE; 3744 rctl |= E1000_RCTL_VFE;
3746 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 3745 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3747 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); 3746 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7be725cdfea8..a6494e5daffe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -54,7 +54,7 @@
54 54
55#include <net/busy_poll.h> 55#include <net/busy_poll.h>
56 56
57#ifdef CONFIG_NET_LL_RX_POLL 57#ifdef CONFIG_NET_RX_BUSY_POLL
58#define LL_EXTENDED_STATS 58#define LL_EXTENDED_STATS
59#endif 59#endif
60/* common prefix used by pr_<> macros */ 60/* common prefix used by pr_<> macros */
@@ -366,7 +366,7 @@ struct ixgbe_q_vector {
366 struct rcu_head rcu; /* to avoid race with update stats on free */ 366 struct rcu_head rcu; /* to avoid race with update stats on free */
367 char name[IFNAMSIZ + 9]; 367 char name[IFNAMSIZ + 9];
368 368
369#ifdef CONFIG_NET_LL_RX_POLL 369#ifdef CONFIG_NET_RX_BUSY_POLL
370 unsigned int state; 370 unsigned int state;
371#define IXGBE_QV_STATE_IDLE 0 371#define IXGBE_QV_STATE_IDLE 0
372#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ 372#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
@@ -377,12 +377,12 @@ struct ixgbe_q_vector {
377#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) 377#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
378#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) 378#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
379 spinlock_t lock; 379 spinlock_t lock;
380#endif /* CONFIG_NET_LL_RX_POLL */ 380#endif /* CONFIG_NET_RX_BUSY_POLL */
381 381
382 /* for dynamic allocation of rings associated with this q_vector */ 382 /* for dynamic allocation of rings associated with this q_vector */
383 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; 383 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
384}; 384};
385#ifdef CONFIG_NET_LL_RX_POLL 385#ifdef CONFIG_NET_RX_BUSY_POLL
386static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) 386static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
387{ 387{
388 388
@@ -462,7 +462,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
462 WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); 462 WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
463 return q_vector->state & IXGBE_QV_USER_PEND; 463 return q_vector->state & IXGBE_QV_USER_PEND;
464} 464}
465#else /* CONFIG_NET_LL_RX_POLL */ 465#else /* CONFIG_NET_RX_BUSY_POLL */
466static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) 466static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
467{ 467{
468} 468}
@@ -491,7 +491,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
491{ 491{
492 return false; 492 return false;
493} 493}
494#endif /* CONFIG_NET_LL_RX_POLL */ 494#endif /* CONFIG_NET_RX_BUSY_POLL */
495 495
496#ifdef CONFIG_IXGBE_HWMON 496#ifdef CONFIG_IXGBE_HWMON
497 497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index ac780770863d..7a77f37a7cbc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -108,9 +108,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
108 108
109 /* Enable arbiter */ 109 /* Enable arbiter */
110 reg &= ~IXGBE_DPMCS_ARBDIS; 110 reg &= ~IXGBE_DPMCS_ARBDIS;
111 /* Enable DFP and Recycle mode */
112 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
113 reg |= IXGBE_DPMCS_TSOEF; 111 reg |= IXGBE_DPMCS_TSOEF;
112
114 /* Configure Max TSO packet size 34KB including payload and headers */ 113 /* Configure Max TSO packet size 34KB including payload and headers */
115 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); 114 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
116 115
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bad8f14b1941..be4b1fb3d0d2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1998,7 +1998,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1998 return total_rx_packets; 1998 return total_rx_packets;
1999} 1999}
2000 2000
2001#ifdef CONFIG_NET_LL_RX_POLL 2001#ifdef CONFIG_NET_RX_BUSY_POLL
2002/* must be called with local_bh_disable()d */ 2002/* must be called with local_bh_disable()d */
2003static int ixgbe_low_latency_recv(struct napi_struct *napi) 2003static int ixgbe_low_latency_recv(struct napi_struct *napi)
2004{ 2004{
@@ -2030,7 +2030,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
2030 2030
2031 return found; 2031 return found;
2032} 2032}
2033#endif /* CONFIG_NET_LL_RX_POLL */ 2033#endif /* CONFIG_NET_RX_BUSY_POLL */
2034 2034
2035/** 2035/**
2036 * ixgbe_configure_msix - Configure MSI-X hardware 2036 * ixgbe_configure_msix - Configure MSI-X hardware
@@ -7227,7 +7227,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7227#ifdef CONFIG_NET_POLL_CONTROLLER 7227#ifdef CONFIG_NET_POLL_CONTROLLER
7228 .ndo_poll_controller = ixgbe_netpoll, 7228 .ndo_poll_controller = ixgbe_netpoll,
7229#endif 7229#endif
7230#ifdef CONFIG_NET_LL_RX_POLL 7230#ifdef CONFIG_NET_RX_BUSY_POLL
7231 .ndo_busy_poll = ixgbe_low_latency_recv, 7231 .ndo_busy_poll = ixgbe_low_latency_recv,
7232#endif 7232#endif
7233#ifdef IXGBE_FCOE 7233#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 712779fb12b7..b017818bccae 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -88,6 +88,8 @@
88#define MVNETA_TX_IN_PRGRS BIT(1) 88#define MVNETA_TX_IN_PRGRS BIT(1)
89#define MVNETA_TX_FIFO_EMPTY BIT(8) 89#define MVNETA_TX_FIFO_EMPTY BIT(8)
90#define MVNETA_RX_MIN_FRAME_SIZE 0x247c 90#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
91#define MVNETA_SGMII_SERDES_CFG 0x24A0
92#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
91#define MVNETA_TYPE_PRIO 0x24bc 93#define MVNETA_TYPE_PRIO 0x24bc
92#define MVNETA_FORCE_UNI BIT(21) 94#define MVNETA_FORCE_UNI BIT(21)
93#define MVNETA_TXQ_CMD_1 0x24e4 95#define MVNETA_TXQ_CMD_1 0x24e4
@@ -655,6 +657,8 @@ static void mvneta_port_sgmii_config(struct mvneta_port *pp)
655 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 657 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
656 val |= MVNETA_GMAC2_PSC_ENABLE; 658 val |= MVNETA_GMAC2_PSC_ENABLE;
657 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 659 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
660
661 mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
658} 662}
659 663
660/* Start the Ethernet port RX and TX activity */ 664/* Start the Ethernet port RX and TX activity */
@@ -2728,28 +2732,24 @@ static int mvneta_probe(struct platform_device *pdev)
2728 2732
2729 pp = netdev_priv(dev); 2733 pp = netdev_priv(dev);
2730 2734
2731 pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
2732 init_timer(&pp->tx_done_timer);
2733 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2734
2735 pp->weight = MVNETA_RX_POLL_WEIGHT; 2735 pp->weight = MVNETA_RX_POLL_WEIGHT;
2736 pp->phy_node = phy_node; 2736 pp->phy_node = phy_node;
2737 pp->phy_interface = phy_mode; 2737 pp->phy_interface = phy_mode;
2738 2738
2739 pp->base = of_iomap(dn, 0);
2740 if (pp->base == NULL) {
2741 err = -ENOMEM;
2742 goto err_free_irq;
2743 }
2744
2745 pp->clk = devm_clk_get(&pdev->dev, NULL); 2739 pp->clk = devm_clk_get(&pdev->dev, NULL);
2746 if (IS_ERR(pp->clk)) { 2740 if (IS_ERR(pp->clk)) {
2747 err = PTR_ERR(pp->clk); 2741 err = PTR_ERR(pp->clk);
2748 goto err_unmap; 2742 goto err_free_irq;
2749 } 2743 }
2750 2744
2751 clk_prepare_enable(pp->clk); 2745 clk_prepare_enable(pp->clk);
2752 2746
2747 pp->base = of_iomap(dn, 0);
2748 if (pp->base == NULL) {
2749 err = -ENOMEM;
2750 goto err_clk;
2751 }
2752
2753 dt_mac_addr = of_get_mac_address(dn); 2753 dt_mac_addr = of_get_mac_address(dn);
2754 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { 2754 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
2755 mac_from = "device tree"; 2755 mac_from = "device tree";
@@ -2766,6 +2766,9 @@ static int mvneta_probe(struct platform_device *pdev)
2766 } 2766 }
2767 2767
2768 pp->tx_done_timer.data = (unsigned long)dev; 2768 pp->tx_done_timer.data = (unsigned long)dev;
2769 pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
2770 init_timer(&pp->tx_done_timer);
2771 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2769 2772
2770 pp->tx_ring_size = MVNETA_MAX_TXD; 2773 pp->tx_ring_size = MVNETA_MAX_TXD;
2771 pp->rx_ring_size = MVNETA_MAX_RXD; 2774 pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -2776,7 +2779,7 @@ static int mvneta_probe(struct platform_device *pdev)
2776 err = mvneta_init(pp, phy_addr); 2779 err = mvneta_init(pp, phy_addr);
2777 if (err < 0) { 2780 if (err < 0) {
2778 dev_err(&pdev->dev, "can't init eth hal\n"); 2781 dev_err(&pdev->dev, "can't init eth hal\n");
2779 goto err_clk; 2782 goto err_unmap;
2780 } 2783 }
2781 mvneta_port_power_up(pp, phy_mode); 2784 mvneta_port_power_up(pp, phy_mode);
2782 2785
@@ -2806,10 +2809,10 @@ static int mvneta_probe(struct platform_device *pdev)
2806 2809
2807err_deinit: 2810err_deinit:
2808 mvneta_deinit(pp); 2811 mvneta_deinit(pp);
2809err_clk:
2810 clk_disable_unprepare(pp->clk);
2811err_unmap: 2812err_unmap:
2812 iounmap(pp->base); 2813 iounmap(pp->base);
2814err_clk:
2815 clk_disable_unprepare(pp->clk);
2813err_free_irq: 2816err_free_irq:
2814 irq_dispose_mapping(dev->irq); 2817 irq_dispose_mapping(dev->irq);
2815err_free_netdev: 2818err_free_netdev:
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c896079728e1..ef94a591f9e5 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
931} 931}
932 932
933/* Allocate and setup a new buffer for receiving */ 933/* Allocate and setup a new buffer for receiving */
934static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 934static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
935 struct sk_buff *skb, unsigned int bufsize) 935 struct sk_buff *skb, unsigned int bufsize)
936{ 936{
937 struct skge_rx_desc *rd = e->desc; 937 struct skge_rx_desc *rd = e->desc;
938 u64 map; 938 dma_addr_t map;
939 939
940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
941 PCI_DMA_FROMDEVICE); 941 PCI_DMA_FROMDEVICE);
942 942
943 rd->dma_lo = map; 943 if (pci_dma_mapping_error(skge->hw->pdev, map))
944 rd->dma_hi = map >> 32; 944 return -1;
945
946 rd->dma_lo = lower_32_bits(map);
947 rd->dma_hi = upper_32_bits(map);
945 e->skb = skb; 948 e->skb = skb;
946 rd->csum1_start = ETH_HLEN; 949 rd->csum1_start = ETH_HLEN;
947 rd->csum2_start = ETH_HLEN; 950 rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
953 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 956 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
954 dma_unmap_addr_set(e, mapaddr, map); 957 dma_unmap_addr_set(e, mapaddr, map);
955 dma_unmap_len_set(e, maplen, bufsize); 958 dma_unmap_len_set(e, maplen, bufsize);
959 return 0;
956} 960}
957 961
958/* Resume receiving using existing skb, 962/* Resume receiving using existing skb,
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev)
1014 return -ENOMEM; 1018 return -ENOMEM;
1015 1019
1016 skb_reserve(skb, NET_IP_ALIGN); 1020 skb_reserve(skb, NET_IP_ALIGN);
1017 skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1021 if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
1022 dev_kfree_skb(skb);
1023 return -EIO;
1024 }
1018 } while ((e = e->next) != ring->start); 1025 } while ((e = e->next) != ring->start);
1019 1026
1020 ring->to_clean = ring->start; 1027 ring->to_clean = ring->start;
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev)
2544 2551
2545 BUG_ON(skge->dma & 7); 2552 BUG_ON(skge->dma & 7);
2546 2553
2547 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { 2554 if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
2548 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); 2555 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
2549 err = -EINVAL; 2556 err = -EINVAL;
2550 goto free_pci_mem; 2557 goto free_pci_mem;
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2729 struct skge_tx_desc *td; 2736 struct skge_tx_desc *td;
2730 int i; 2737 int i;
2731 u32 control, len; 2738 u32 control, len;
2732 u64 map; 2739 dma_addr_t map;
2733 2740
2734 if (skb_padto(skb, ETH_ZLEN)) 2741 if (skb_padto(skb, ETH_ZLEN))
2735 return NETDEV_TX_OK; 2742 return NETDEV_TX_OK;
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2743 e->skb = skb; 2750 e->skb = skb;
2744 len = skb_headlen(skb); 2751 len = skb_headlen(skb);
2745 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2752 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2753 if (pci_dma_mapping_error(hw->pdev, map))
2754 goto mapping_error;
2755
2746 dma_unmap_addr_set(e, mapaddr, map); 2756 dma_unmap_addr_set(e, mapaddr, map);
2747 dma_unmap_len_set(e, maplen, len); 2757 dma_unmap_len_set(e, maplen, len);
2748 2758
2749 td->dma_lo = map; 2759 td->dma_lo = lower_32_bits(map);
2750 td->dma_hi = map >> 32; 2760 td->dma_hi = upper_32_bits(map);
2751 2761
2752 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2762 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2753 const int offset = skb_checksum_start_offset(skb); 2763 const int offset = skb_checksum_start_offset(skb);
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2778 2788
2779 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2789 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
2780 skb_frag_size(frag), DMA_TO_DEVICE); 2790 skb_frag_size(frag), DMA_TO_DEVICE);
2791 if (dma_mapping_error(&hw->pdev->dev, map))
2792 goto mapping_unwind;
2781 2793
2782 e = e->next; 2794 e = e->next;
2783 e->skb = skb; 2795 e->skb = skb;
2784 tf = e->desc; 2796 tf = e->desc;
2785 BUG_ON(tf->control & BMU_OWN); 2797 BUG_ON(tf->control & BMU_OWN);
2786 2798
2787 tf->dma_lo = map; 2799 tf->dma_lo = lower_32_bits(map);
2788 tf->dma_hi = (u64) map >> 32; 2800 tf->dma_hi = upper_32_bits(map);
2789 dma_unmap_addr_set(e, mapaddr, map); 2801 dma_unmap_addr_set(e, mapaddr, map);
2790 dma_unmap_len_set(e, maplen, skb_frag_size(frag)); 2802 dma_unmap_len_set(e, maplen, skb_frag_size(frag));
2791 2803
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2815 } 2827 }
2816 2828
2817 return NETDEV_TX_OK; 2829 return NETDEV_TX_OK;
2830
2831mapping_unwind:
2832 e = skge->tx_ring.to_use;
2833 pci_unmap_single(hw->pdev,
2834 dma_unmap_addr(e, mapaddr),
2835 dma_unmap_len(e, maplen),
2836 PCI_DMA_TODEVICE);
2837 while (i-- > 0) {
2838 e = e->next;
2839 pci_unmap_page(hw->pdev,
2840 dma_unmap_addr(e, mapaddr),
2841 dma_unmap_len(e, maplen),
2842 PCI_DMA_TODEVICE);
2843 }
2844
2845mapping_error:
2846 if (net_ratelimit())
2847 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2848 dev_kfree_skb(skb);
2849 return NETDEV_TX_OK;
2818} 2850}
2819 2851
2820 2852
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3045 3077
3046 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3078 pci_dma_sync_single_for_cpu(skge->hw->pdev,
3047 dma_unmap_addr(e, mapaddr), 3079 dma_unmap_addr(e, mapaddr),
3048 len, PCI_DMA_FROMDEVICE); 3080 dma_unmap_len(e, maplen),
3081 PCI_DMA_FROMDEVICE);
3049 skb_copy_from_linear_data(e->skb, skb->data, len); 3082 skb_copy_from_linear_data(e->skb, skb->data, len);
3050 pci_dma_sync_single_for_device(skge->hw->pdev, 3083 pci_dma_sync_single_for_device(skge->hw->pdev,
3051 dma_unmap_addr(e, mapaddr), 3084 dma_unmap_addr(e, mapaddr),
3052 len, PCI_DMA_FROMDEVICE); 3085 dma_unmap_len(e, maplen),
3086 PCI_DMA_FROMDEVICE);
3053 skge_rx_reuse(e, skge->rx_buf_size); 3087 skge_rx_reuse(e, skge->rx_buf_size);
3054 } else { 3088 } else {
3055 struct sk_buff *nskb; 3089 struct sk_buff *nskb;
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3058 if (!nskb) 3092 if (!nskb)
3059 goto resubmit; 3093 goto resubmit;
3060 3094
3095 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
3096 dev_kfree_skb(nskb);
3097 goto resubmit;
3098 }
3099
3061 pci_unmap_single(skge->hw->pdev, 3100 pci_unmap_single(skge->hw->pdev,
3062 dma_unmap_addr(e, mapaddr), 3101 dma_unmap_addr(e, mapaddr),
3063 dma_unmap_len(e, maplen), 3102 dma_unmap_len(e, maplen),
3064 PCI_DMA_FROMDEVICE); 3103 PCI_DMA_FROMDEVICE);
3065 skb = e->skb; 3104 skb = e->skb;
3066 prefetch(skb->data); 3105 prefetch(skb->data);
3067 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
3068 } 3106 }
3069 3107
3070 skb_put(skb, len); 3108 skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 727874f575ce..a28cd801a236 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -223,7 +223,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
223 case ETH_SS_STATS: 223 case ETH_SS_STATS:
224 return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + 224 return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
225 (priv->tx_ring_num * 2) + 225 (priv->tx_ring_num * 2) +
226#ifdef CONFIG_NET_LL_RX_POLL 226#ifdef CONFIG_NET_RX_BUSY_POLL
227 (priv->rx_ring_num * 5); 227 (priv->rx_ring_num * 5);
228#else 228#else
229 (priv->rx_ring_num * 2); 229 (priv->rx_ring_num * 2);
@@ -276,7 +276,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
276 for (i = 0; i < priv->rx_ring_num; i++) { 276 for (i = 0; i < priv->rx_ring_num; i++) {
277 data[index++] = priv->rx_ring[i].packets; 277 data[index++] = priv->rx_ring[i].packets;
278 data[index++] = priv->rx_ring[i].bytes; 278 data[index++] = priv->rx_ring[i].bytes;
279#ifdef CONFIG_NET_LL_RX_POLL 279#ifdef CONFIG_NET_RX_BUSY_POLL
280 data[index++] = priv->rx_ring[i].yields; 280 data[index++] = priv->rx_ring[i].yields;
281 data[index++] = priv->rx_ring[i].misses; 281 data[index++] = priv->rx_ring[i].misses;
282 data[index++] = priv->rx_ring[i].cleaned; 282 data[index++] = priv->rx_ring[i].cleaned;
@@ -344,7 +344,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
344 "rx%d_packets", i); 344 "rx%d_packets", i);
345 sprintf(data + (index++) * ETH_GSTRING_LEN, 345 sprintf(data + (index++) * ETH_GSTRING_LEN,
346 "rx%d_bytes", i); 346 "rx%d_bytes", i);
347#ifdef CONFIG_NET_LL_RX_POLL 347#ifdef CONFIG_NET_RX_BUSY_POLL
348 sprintf(data + (index++) * ETH_GSTRING_LEN, 348 sprintf(data + (index++) * ETH_GSTRING_LEN,
349 "rx%d_napi_yield", i); 349 "rx%d_napi_yield", i);
350 sprintf(data + (index++) * ETH_GSTRING_LEN, 350 sprintf(data + (index++) * ETH_GSTRING_LEN,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5eac871399d8..fa37b7a61213 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -68,7 +68,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
68 return 0; 68 return 0;
69} 69}
70 70
71#ifdef CONFIG_NET_LL_RX_POLL 71#ifdef CONFIG_NET_RX_BUSY_POLL
72/* must be called with local_bh_disable()d */ 72/* must be called with local_bh_disable()d */
73static int mlx4_en_low_latency_recv(struct napi_struct *napi) 73static int mlx4_en_low_latency_recv(struct napi_struct *napi)
74{ 74{
@@ -94,7 +94,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
94 94
95 return done; 95 return done;
96} 96}
97#endif /* CONFIG_NET_LL_RX_POLL */ 97#endif /* CONFIG_NET_RX_BUSY_POLL */
98 98
99#ifdef CONFIG_RFS_ACCEL 99#ifdef CONFIG_RFS_ACCEL
100 100
@@ -2140,7 +2140,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
2140#ifdef CONFIG_RFS_ACCEL 2140#ifdef CONFIG_RFS_ACCEL
2141 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2141 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2142#endif 2142#endif
2143#ifdef CONFIG_NET_LL_RX_POLL 2143#ifdef CONFIG_NET_RX_BUSY_POLL
2144 .ndo_busy_poll = mlx4_en_low_latency_recv, 2144 .ndo_busy_poll = mlx4_en_low_latency_recv,
2145#endif 2145#endif
2146}; 2146};
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 8873d6802c80..6fc6dabc78d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -845,16 +845,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
845 MLX4_CMD_NATIVE); 845 MLX4_CMD_NATIVE);
846 846
847 if (!err && dev->caps.function != slave) { 847 if (!err && dev->caps.function != slave) {
848 /* if config MAC in DB use it */ 848 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
849 if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
850 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
851 else {
852 /* set slave default_mac address */
853 MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
854 def_mac += slave << 8;
855 priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
856 }
857
858 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 849 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
859 850
860 /* get port type - currently only eth is enabled */ 851 /* get port type - currently only eth is enabled */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index e85af922dcdc..36be3208786a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -371,7 +371,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
371 371
372 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 372 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
373 373
374 if (!enable_64b_cqe_eqe) { 374 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
375 if (dev_cap->flags & 375 if (dev_cap->flags &
376 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 376 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
377 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 377 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 35fb60e2320c..5e0aa569306a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -292,7 +292,7 @@ struct mlx4_en_rx_ring {
292 void *rx_info; 292 void *rx_info;
293 unsigned long bytes; 293 unsigned long bytes;
294 unsigned long packets; 294 unsigned long packets;
295#ifdef CONFIG_NET_LL_RX_POLL 295#ifdef CONFIG_NET_RX_BUSY_POLL
296 unsigned long yields; 296 unsigned long yields;
297 unsigned long misses; 297 unsigned long misses;
298 unsigned long cleaned; 298 unsigned long cleaned;
@@ -318,7 +318,7 @@ struct mlx4_en_cq {
318 struct mlx4_cqe *buf; 318 struct mlx4_cqe *buf;
319#define MLX4_EN_OPCODE_ERROR 0x1e 319#define MLX4_EN_OPCODE_ERROR 0x1e
320 320
321#ifdef CONFIG_NET_LL_RX_POLL 321#ifdef CONFIG_NET_RX_BUSY_POLL
322 unsigned int state; 322 unsigned int state;
323#define MLX4_EN_CQ_STATE_IDLE 0 323#define MLX4_EN_CQ_STATE_IDLE 0
324#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ 324#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
@@ -329,7 +329,7 @@ struct mlx4_en_cq {
329#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) 329#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
330#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) 330#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
331 spinlock_t poll_lock; /* protects from LLS/napi conflicts */ 331 spinlock_t poll_lock; /* protects from LLS/napi conflicts */
332#endif /* CONFIG_NET_LL_RX_POLL */ 332#endif /* CONFIG_NET_RX_BUSY_POLL */
333}; 333};
334 334
335struct mlx4_en_port_profile { 335struct mlx4_en_port_profile {
@@ -580,7 +580,7 @@ struct mlx4_mac_entry {
580 struct rcu_head rcu; 580 struct rcu_head rcu;
581}; 581};
582 582
583#ifdef CONFIG_NET_LL_RX_POLL 583#ifdef CONFIG_NET_RX_BUSY_POLL
584static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) 584static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
585{ 585{
586 spin_lock_init(&cq->poll_lock); 586 spin_lock_init(&cq->poll_lock);
@@ -687,7 +687,7 @@ static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
687{ 687{
688 return false; 688 return false;
689} 689}
690#endif /* CONFIG_NET_LL_RX_POLL */ 690#endif /* CONFIG_NET_RX_BUSY_POLL */
691 691
692#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) 692#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
693 693
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 205753a04cfc..5472cbd34028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -46,7 +46,7 @@
46#include "mlx5_core.h" 46#include "mlx5_core.h"
47 47
48enum { 48enum {
49 CMD_IF_REV = 3, 49 CMD_IF_REV = 5,
50}; 50};
51 51
52enum { 52enum {
@@ -282,6 +282,12 @@ const char *mlx5_command_str(int command)
282 case MLX5_CMD_OP_TEARDOWN_HCA: 282 case MLX5_CMD_OP_TEARDOWN_HCA:
283 return "TEARDOWN_HCA"; 283 return "TEARDOWN_HCA";
284 284
285 case MLX5_CMD_OP_ENABLE_HCA:
286 return "MLX5_CMD_OP_ENABLE_HCA";
287
288 case MLX5_CMD_OP_DISABLE_HCA:
289 return "MLX5_CMD_OP_DISABLE_HCA";
290
285 case MLX5_CMD_OP_QUERY_PAGES: 291 case MLX5_CMD_OP_QUERY_PAGES:
286 return "QUERY_PAGES"; 292 return "QUERY_PAGES";
287 293
@@ -1113,7 +1119,13 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1113 1119
1114 for (i = 0; i < (1 << cmd->log_sz); i++) { 1120 for (i = 0; i < (1 << cmd->log_sz); i++) {
1115 if (test_bit(i, &vector)) { 1121 if (test_bit(i, &vector)) {
1122 struct semaphore *sem;
1123
1116 ent = cmd->ent_arr[i]; 1124 ent = cmd->ent_arr[i];
1125 if (ent->page_queue)
1126 sem = &cmd->pages_sem;
1127 else
1128 sem = &cmd->sem;
1117 ktime_get_ts(&ent->ts2); 1129 ktime_get_ts(&ent->ts2);
1118 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1130 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1119 dump_command(dev, ent, 0); 1131 dump_command(dev, ent, 0);
@@ -1136,10 +1148,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1136 } else { 1148 } else {
1137 complete(&ent->done); 1149 complete(&ent->done);
1138 } 1150 }
1139 if (ent->page_queue) 1151 up(sem);
1140 up(&cmd->pages_sem);
1141 else
1142 up(&cmd->sem);
1143 } 1152 }
1144 } 1153 }
1145} 1154}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c02cbcfd0fb8..443cc4d7b024 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
268 case MLX5_EVENT_TYPE_PAGE_REQUEST: 268 case MLX5_EVENT_TYPE_PAGE_REQUEST:
269 { 269 {
270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
271 s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); 271 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
272 272
273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); 273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
274 mlx5_core_req_pages_handler(dev, func_id, npages); 274 mlx5_core_req_pages_handler(dev, func_id, npages);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 72a5222447f5..f012658b6a92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; 113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; 114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
115 caps->log_max_mcg = out->hca_cap.log_max_mcg; 115 caps->log_max_mcg = out->hca_cap.log_max_mcg;
116 caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); 116 caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); 117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); 118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; 119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 748f10a155c4..3e6670c4a7cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -55,33 +55,9 @@ enum {
55}; 55};
56 56
57static DEFINE_SPINLOCK(health_lock); 57static DEFINE_SPINLOCK(health_lock);
58
59static LIST_HEAD(health_list); 58static LIST_HEAD(health_list);
60static struct work_struct health_work; 59static struct work_struct health_work;
61 60
62static health_handler_t reg_handler;
63int mlx5_register_health_report_handler(health_handler_t handler)
64{
65 spin_lock_irq(&health_lock);
66 if (reg_handler) {
67 spin_unlock_irq(&health_lock);
68 return -EEXIST;
69 }
70 reg_handler = handler;
71 spin_unlock_irq(&health_lock);
72
73 return 0;
74}
75EXPORT_SYMBOL(mlx5_register_health_report_handler);
76
77void mlx5_unregister_health_report_handler(void)
78{
79 spin_lock_irq(&health_lock);
80 reg_handler = NULL;
81 spin_unlock_irq(&health_lock);
82}
83EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
84
85static void health_care(struct work_struct *work) 61static void health_care(struct work_struct *work)
86{ 62{
87 struct mlx5_core_health *health, *n; 63 struct mlx5_core_health *health, *n;
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work)
98 priv = container_of(health, struct mlx5_priv, health); 74 priv = container_of(health, struct mlx5_priv, health);
99 dev = container_of(priv, struct mlx5_core_dev, priv); 75 dev = container_of(priv, struct mlx5_core_dev, priv);
100 mlx5_core_warn(dev, "handling bad device here\n"); 76 mlx5_core_warn(dev, "handling bad device here\n");
77 /* nothing yet */
101 spin_lock_irq(&health_lock); 78 spin_lock_irq(&health_lock);
102 if (reg_handler)
103 reg_handler(dev->pdev, health->health,
104 sizeof(health->health));
105
106 list_del_init(&health->list); 79 list_del_init(&health->list);
107 spin_unlock_irq(&health_lock); 80 spin_unlock_irq(&health_lock);
108 } 81 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 12242de2b0e3..b47739b0b5f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -249,6 +249,44 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
249 return err; 249 return err;
250} 250}
251 251
252static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
253{
254 int err;
255 struct mlx5_enable_hca_mbox_in in;
256 struct mlx5_enable_hca_mbox_out out;
257
258 memset(&in, 0, sizeof(in));
259 memset(&out, 0, sizeof(out));
260 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA);
261 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
262 if (err)
263 return err;
264
265 if (out.hdr.status)
266 return mlx5_cmd_status_to_err(&out.hdr);
267
268 return 0;
269}
270
271static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
272{
273 int err;
274 struct mlx5_disable_hca_mbox_in in;
275 struct mlx5_disable_hca_mbox_out out;
276
277 memset(&in, 0, sizeof(in));
278 memset(&out, 0, sizeof(out));
279 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA);
280 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
281 if (err)
282 return err;
283
284 if (out.hdr.status)
285 return mlx5_cmd_status_to_err(&out.hdr);
286
287 return 0;
288}
289
252int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) 290int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
253{ 291{
254 struct mlx5_priv *priv = &dev->priv; 292 struct mlx5_priv *priv = &dev->priv;
@@ -304,28 +342,41 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
304 } 342 }
305 343
306 mlx5_pagealloc_init(dev); 344 mlx5_pagealloc_init(dev);
345
346 err = mlx5_core_enable_hca(dev);
347 if (err) {
348 dev_err(&pdev->dev, "enable hca failed\n");
349 goto err_pagealloc_cleanup;
350 }
351
352 err = mlx5_satisfy_startup_pages(dev, 1);
353 if (err) {
354 dev_err(&pdev->dev, "failed to allocate boot pages\n");
355 goto err_disable_hca;
356 }
357
307 err = set_hca_ctrl(dev); 358 err = set_hca_ctrl(dev);
308 if (err) { 359 if (err) {
309 dev_err(&pdev->dev, "set_hca_ctrl failed\n"); 360 dev_err(&pdev->dev, "set_hca_ctrl failed\n");
310 goto err_pagealloc_cleanup; 361 goto reclaim_boot_pages;
311 } 362 }
312 363
313 err = handle_hca_cap(dev); 364 err = handle_hca_cap(dev);
314 if (err) { 365 if (err) {
315 dev_err(&pdev->dev, "handle_hca_cap failed\n"); 366 dev_err(&pdev->dev, "handle_hca_cap failed\n");
316 goto err_pagealloc_cleanup; 367 goto reclaim_boot_pages;
317 } 368 }
318 369
319 err = mlx5_satisfy_startup_pages(dev); 370 err = mlx5_satisfy_startup_pages(dev, 0);
320 if (err) { 371 if (err) {
321 dev_err(&pdev->dev, "failed to allocate startup pages\n"); 372 dev_err(&pdev->dev, "failed to allocate init pages\n");
322 goto err_pagealloc_cleanup; 373 goto reclaim_boot_pages;
323 } 374 }
324 375
325 err = mlx5_pagealloc_start(dev); 376 err = mlx5_pagealloc_start(dev);
326 if (err) { 377 if (err) {
327 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); 378 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
328 goto err_reclaim_pages; 379 goto reclaim_boot_pages;
329 } 380 }
330 381
331 err = mlx5_cmd_init_hca(dev); 382 err = mlx5_cmd_init_hca(dev);
@@ -396,9 +447,12 @@ err_stop_poll:
396err_pagealloc_stop: 447err_pagealloc_stop:
397 mlx5_pagealloc_stop(dev); 448 mlx5_pagealloc_stop(dev);
398 449
399err_reclaim_pages: 450reclaim_boot_pages:
400 mlx5_reclaim_startup_pages(dev); 451 mlx5_reclaim_startup_pages(dev);
401 452
453err_disable_hca:
454 mlx5_core_disable_hca(dev);
455
402err_pagealloc_cleanup: 456err_pagealloc_cleanup:
403 mlx5_pagealloc_cleanup(dev); 457 mlx5_pagealloc_cleanup(dev);
404 mlx5_cmd_cleanup(dev); 458 mlx5_cmd_cleanup(dev);
@@ -434,6 +488,7 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
434 mlx5_cmd_teardown_hca(dev); 488 mlx5_cmd_teardown_hca(dev);
435 mlx5_pagealloc_stop(dev); 489 mlx5_pagealloc_stop(dev);
436 mlx5_reclaim_startup_pages(dev); 490 mlx5_reclaim_startup_pages(dev);
491 mlx5_core_disable_hca(dev);
437 mlx5_pagealloc_cleanup(dev); 492 mlx5_pagealloc_cleanup(dev);
438 mlx5_cmd_cleanup(dev); 493 mlx5_cmd_cleanup(dev);
439 iounmap(dev->iseg); 494 iounmap(dev->iseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index f0bf46339b28..3a2408d44820 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -43,10 +43,16 @@ enum {
43 MLX5_PAGES_TAKE = 2 43 MLX5_PAGES_TAKE = 2
44}; 44};
45 45
46enum {
47 MLX5_BOOT_PAGES = 1,
48 MLX5_INIT_PAGES = 2,
49 MLX5_POST_INIT_PAGES = 3
50};
51
46struct mlx5_pages_req { 52struct mlx5_pages_req {
47 struct mlx5_core_dev *dev; 53 struct mlx5_core_dev *dev;
48 u32 func_id; 54 u32 func_id;
49 s16 npages; 55 s32 npages;
50 struct work_struct work; 56 struct work_struct work;
51}; 57};
52 58
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
64 70
65struct mlx5_query_pages_outbox { 71struct mlx5_query_pages_outbox {
66 struct mlx5_outbox_hdr hdr; 72 struct mlx5_outbox_hdr hdr;
67 u8 reserved[2]; 73 __be16 rsvd;
68 __be16 func_id; 74 __be16 func_id;
69 __be16 init_pages; 75 __be32 num_pages;
70 __be16 num_pages;
71}; 76};
72 77
73struct mlx5_manage_pages_inbox { 78struct mlx5_manage_pages_inbox {
74 struct mlx5_inbox_hdr hdr; 79 struct mlx5_inbox_hdr hdr;
75 __be16 rsvd0; 80 __be16 rsvd;
76 __be16 func_id; 81 __be16 func_id;
77 __be16 rsvd1; 82 __be32 num_entries;
78 __be16 num_entries;
79 u8 rsvd2[16];
80 __be64 pas[0]; 83 __be64 pas[0];
81}; 84};
82 85
83struct mlx5_manage_pages_outbox { 86struct mlx5_manage_pages_outbox {
84 struct mlx5_outbox_hdr hdr; 87 struct mlx5_outbox_hdr hdr;
85 u8 rsvd0[2]; 88 __be32 num_entries;
86 __be16 num_entries; 89 u8 rsvd[4];
87 u8 rsvd1[20];
88 __be64 pas[0]; 90 __be64 pas[0];
89}; 91};
90 92
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
146} 148}
147 149
148static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, 150static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
149 s16 *pages, s16 *init_pages) 151 s32 *npages, int boot)
150{ 152{
151 struct mlx5_query_pages_inbox in; 153 struct mlx5_query_pages_inbox in;
152 struct mlx5_query_pages_outbox out; 154 struct mlx5_query_pages_outbox out;
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
155 memset(&in, 0, sizeof(in)); 157 memset(&in, 0, sizeof(in));
156 memset(&out, 0, sizeof(out)); 158 memset(&out, 0, sizeof(out));
157 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); 159 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
160 in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
161
158 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 162 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
159 if (err) 163 if (err)
160 return err; 164 return err;
@@ -162,10 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
162 if (out.hdr.status) 166 if (out.hdr.status)
163 return mlx5_cmd_status_to_err(&out.hdr); 167 return mlx5_cmd_status_to_err(&out.hdr);
164 168
165 if (pages) 169 *npages = be32_to_cpu(out.num_pages);
166 *pages = be16_to_cpu(out.num_pages);
167 if (init_pages)
168 *init_pages = be16_to_cpu(out.init_pages);
169 *func_id = be16_to_cpu(out.func_id); 170 *func_id = be16_to_cpu(out.func_id);
170 171
171 return err; 172 return err;
@@ -219,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
219 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 220 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
220 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); 221 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
221 in->func_id = cpu_to_be16(func_id); 222 in->func_id = cpu_to_be16(func_id);
222 in->num_entries = cpu_to_be16(npages); 223 in->num_entries = cpu_to_be32(npages);
223 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 224 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
224 mlx5_core_dbg(dev, "err %d\n", err); 225 mlx5_core_dbg(dev, "err %d\n", err);
225 if (err) { 226 if (err) {
@@ -287,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
287 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 288 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
288 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); 289 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
289 in.func_id = cpu_to_be16(func_id); 290 in.func_id = cpu_to_be16(func_id);
290 in.num_entries = cpu_to_be16(npages); 291 in.num_entries = cpu_to_be32(npages);
291 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 292 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
292 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 293 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
293 if (err) { 294 if (err) {
@@ -301,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
301 goto out_free; 302 goto out_free;
302 } 303 }
303 304
304 num_claimed = be16_to_cpu(out->num_entries); 305 num_claimed = be32_to_cpu(out->num_entries);
305 if (nclaimed) 306 if (nclaimed)
306 *nclaimed = num_claimed; 307 *nclaimed = num_claimed;
307 308
@@ -340,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
340} 341}
341 342
342void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 343void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
343 s16 npages) 344 s32 npages)
344{ 345{
345 struct mlx5_pages_req *req; 346 struct mlx5_pages_req *req;
346 347
@@ -357,19 +358,20 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
357 queue_work(dev->priv.pg_wq, &req->work); 358 queue_work(dev->priv.pg_wq, &req->work);
358} 359}
359 360
360int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev) 361int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
361{ 362{
362 s16 uninitialized_var(init_pages);
363 u16 uninitialized_var(func_id); 363 u16 uninitialized_var(func_id);
364 s32 uninitialized_var(npages);
364 int err; 365 int err;
365 366
366 err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages); 367 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
367 if (err) 368 if (err)
368 return err; 369 return err;
369 370
370 mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id); 371 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
372 npages, boot ? "boot" : "init", func_id);
371 373
372 return give_pages(dev, func_id, init_pages, 0); 374 return give_pages(dev, func_id, npages, 0);
373} 375}
374 376
375static int optimal_reclaimed_pages(void) 377static int optimal_reclaimed_pages(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 71d4a3937200..68f5d9c77c7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -164,6 +164,7 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
164 uuari->uars[i].map = ioremap(addr, PAGE_SIZE); 164 uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
165 if (!uuari->uars[i].map) { 165 if (!uuari->uars[i].map) {
166 mlx5_cmd_free_uar(dev, uuari->uars[i].index); 166 mlx5_cmd_free_uar(dev, uuari->uars[i].index);
167 err = -ENOMEM;
167 goto out_count; 168 goto out_count;
168 } 169 }
169 mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", 170 mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index cb22341a14a8..a588ffde9700 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -4,7 +4,7 @@
4 4
5config PCH_GBE 5config PCH_GBE
6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" 6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
7 depends on PCI 7 depends on PCI && (X86 || COMPILE_TEST)
8 select MII 8 select MII
9 select PTP_1588_CLOCK_PCH 9 select PTP_1588_CLOCK_PCH
10 ---help--- 10 ---help---
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index b00cf5665eab..221645e9f182 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1400,8 +1400,8 @@ void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
1400#define ADDR_IN_RANGE(addr, low, high) \ 1400#define ADDR_IN_RANGE(addr, low, high) \
1401 (((addr) < (high)) && ((addr) >= (low))) 1401 (((addr) < (high)) && ((addr) >= (low)))
1402 1402
1403#define QLCRD32(adapter, off) \ 1403#define QLCRD32(adapter, off, err) \
1404 (adapter->ahw->hw_ops->read_reg)(adapter, off) 1404 (adapter->ahw->hw_ops->read_reg)(adapter, off, err)
1405 1405
1406#define QLCWR32(adapter, off, val) \ 1406#define QLCWR32(adapter, off, val) \
1407 adapter->ahw->hw_ops->write_reg(adapter, off, val) 1407 adapter->ahw->hw_ops->write_reg(adapter, off, val)
@@ -1604,7 +1604,7 @@ struct qlcnic_nic_template {
1604struct qlcnic_hardware_ops { 1604struct qlcnic_hardware_ops {
1605 void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); 1605 void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
1606 void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); 1606 void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
1607 int (*read_reg) (struct qlcnic_adapter *, ulong); 1607 int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
1608 int (*write_reg) (struct qlcnic_adapter *, ulong, u32); 1608 int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
1609 void (*get_ocm_win) (struct qlcnic_hardware_context *); 1609 void (*get_ocm_win) (struct qlcnic_hardware_context *);
1610 int (*get_mac_address) (struct qlcnic_adapter *, u8 *); 1610 int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
@@ -1662,12 +1662,6 @@ static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
1662 adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size); 1662 adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
1663} 1663}
1664 1664
1665static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter,
1666 ulong off)
1667{
1668 return adapter->ahw->hw_ops->read_reg(adapter, off);
1669}
1670
1671static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, 1665static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
1672 ulong off, u32 data) 1666 ulong off, u32 data)
1673{ 1667{
@@ -1869,7 +1863,8 @@ static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
1869 1863
1870static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter) 1864static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
1871{ 1865{
1872 adapter->ahw->hw_ops->set_mac_filter_count(adapter); 1866 if (adapter->ahw->hw_ops->set_mac_filter_count)
1867 adapter->ahw->hw_ops->set_mac_filter_count(adapter);
1873} 1868}
1874 1869
1875static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, 1870static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 0913c623a67e..9d4bb7f83904 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -228,17 +228,17 @@ static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
228 return 0; 228 return 0;
229} 229}
230 230
231int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr) 231int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
232 int *err)
232{ 233{
233 int ret;
234 struct qlcnic_hardware_context *ahw = adapter->ahw; 234 struct qlcnic_hardware_context *ahw = adapter->ahw;
235 235
236 ret = __qlcnic_set_win_base(adapter, (u32) addr); 236 *err = __qlcnic_set_win_base(adapter, (u32) addr);
237 if (!ret) { 237 if (!*err) {
238 return QLCRDX(ahw, QLCNIC_WILDCARD); 238 return QLCRDX(ahw, QLCNIC_WILDCARD);
239 } else { 239 } else {
240 dev_err(&adapter->pdev->dev, 240 dev_err(&adapter->pdev->dev,
241 "%s failed, addr = 0x%x\n", __func__, (int)addr); 241 "%s failed, addr = 0x%lx\n", __func__, addr);
242 return -EIO; 242 return -EIO;
243 } 243 }
244} 244}
@@ -561,7 +561,7 @@ void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
561void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf, 561void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
562 loff_t offset, size_t size) 562 loff_t offset, size_t size)
563{ 563{
564 int ret; 564 int ret = 0;
565 u32 data; 565 u32 data;
566 566
567 if (qlcnic_api_lock(adapter)) { 567 if (qlcnic_api_lock(adapter)) {
@@ -571,7 +571,7 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
571 return; 571 return;
572 } 572 }
573 573
574 ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset); 574 data = QLCRD32(adapter, (u32) offset, &ret);
575 qlcnic_api_unlock(adapter); 575 qlcnic_api_unlock(adapter);
576 576
577 if (ret == -EIO) { 577 if (ret == -EIO) {
@@ -580,7 +580,6 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
580 __func__, (u32)offset); 580 __func__, (u32)offset);
581 return; 581 return;
582 } 582 }
583 data = ret;
584 memcpy(buf, &data, size); 583 memcpy(buf, &data, size);
585} 584}
586 585
@@ -2075,18 +2074,25 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
2075static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, 2074static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
2076 u32 data[]) 2075 u32 data[])
2077{ 2076{
2077 struct qlcnic_hardware_context *ahw = adapter->ahw;
2078 u8 link_status, duplex; 2078 u8 link_status, duplex;
2079 /* link speed */ 2079 /* link speed */
2080 link_status = LSB(data[3]) & 1; 2080 link_status = LSB(data[3]) & 1;
2081 adapter->ahw->link_speed = MSW(data[2]); 2081 if (link_status) {
2082 adapter->ahw->link_autoneg = MSB(MSW(data[3])); 2082 ahw->link_speed = MSW(data[2]);
2083 adapter->ahw->module_type = MSB(LSW(data[3])); 2083 duplex = LSB(MSW(data[3]));
2084 duplex = LSB(MSW(data[3])); 2084 if (duplex)
2085 if (duplex) 2085 ahw->link_duplex = DUPLEX_FULL;
2086 adapter->ahw->link_duplex = DUPLEX_FULL; 2086 else
2087 else 2087 ahw->link_duplex = DUPLEX_HALF;
2088 adapter->ahw->link_duplex = DUPLEX_HALF; 2088 } else {
2089 adapter->ahw->has_link_events = 1; 2089 ahw->link_speed = SPEED_UNKNOWN;
2090 ahw->link_duplex = DUPLEX_UNKNOWN;
2091 }
2092
2093 ahw->link_autoneg = MSB(MSW(data[3]));
2094 ahw->module_type = MSB(LSW(data[3]));
2095 ahw->has_link_events = 1;
2090 qlcnic_advert_link_change(adapter, link_status); 2096 qlcnic_advert_link_change(adapter, link_status);
2091} 2097}
2092 2098
@@ -2384,9 +2390,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
2384 u32 flash_addr, u8 *p_data, 2390 u32 flash_addr, u8 *p_data,
2385 int count) 2391 int count)
2386{ 2392{
2387 int i, ret; 2393 u32 word, range, flash_offset, addr = flash_addr, ret;
2388 u32 word, range, flash_offset, addr = flash_addr;
2389 ulong indirect_add, direct_window; 2394 ulong indirect_add, direct_window;
2395 int i, err = 0;
2390 2396
2391 flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1); 2397 flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
2392 if (addr & 0x3) { 2398 if (addr & 0x3) {
@@ -2404,10 +2410,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
2404 /* Multi sector read */ 2410 /* Multi sector read */
2405 for (i = 0; i < count; i++) { 2411 for (i = 0; i < count; i++) {
2406 indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); 2412 indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
2407 ret = qlcnic_83xx_rd_reg_indirect(adapter, 2413 ret = QLCRD32(adapter, indirect_add, &err);
2408 indirect_add); 2414 if (err == -EIO)
2409 if (ret == -EIO) 2415 return err;
2410 return -EIO;
2411 2416
2412 word = ret; 2417 word = ret;
2413 *(u32 *)p_data = word; 2418 *(u32 *)p_data = word;
@@ -2428,10 +2433,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
2428 /* Single sector read */ 2433 /* Single sector read */
2429 for (i = 0; i < count; i++) { 2434 for (i = 0; i < count; i++) {
2430 indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); 2435 indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
2431 ret = qlcnic_83xx_rd_reg_indirect(adapter, 2436 ret = QLCRD32(adapter, indirect_add, &err);
2432 indirect_add); 2437 if (err == -EIO)
2433 if (ret == -EIO) 2438 return err;
2434 return -EIO;
2435 2439
2436 word = ret; 2440 word = ret;
2437 *(u32 *)p_data = word; 2441 *(u32 *)p_data = word;
@@ -2447,10 +2451,13 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
2447{ 2451{
2448 u32 status; 2452 u32 status;
2449 int retries = QLC_83XX_FLASH_READ_RETRY_COUNT; 2453 int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
2454 int err = 0;
2450 2455
2451 do { 2456 do {
2452 status = qlcnic_83xx_rd_reg_indirect(adapter, 2457 status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
2453 QLC_83XX_FLASH_STATUS); 2458 if (err == -EIO)
2459 return err;
2460
2454 if ((status & QLC_83XX_FLASH_STATUS_READY) == 2461 if ((status & QLC_83XX_FLASH_STATUS_READY) ==
2455 QLC_83XX_FLASH_STATUS_READY) 2462 QLC_83XX_FLASH_STATUS_READY)
2456 break; 2463 break;
@@ -2502,7 +2509,8 @@ int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
2502 2509
2503int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter) 2510int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
2504{ 2511{
2505 int ret, mfg_id; 2512 int ret, err = 0;
2513 u32 mfg_id;
2506 2514
2507 if (qlcnic_83xx_lock_flash(adapter)) 2515 if (qlcnic_83xx_lock_flash(adapter))
2508 return -EIO; 2516 return -EIO;
@@ -2517,9 +2525,11 @@ int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
2517 return -EIO; 2525 return -EIO;
2518 } 2526 }
2519 2527
2520 mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA); 2528 mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
2521 if (mfg_id == -EIO) 2529 if (err == -EIO) {
2522 return -EIO; 2530 qlcnic_83xx_unlock_flash(adapter);
2531 return err;
2532 }
2523 2533
2524 adapter->flash_mfg_id = (mfg_id & 0xFF); 2534 adapter->flash_mfg_id = (mfg_id & 0xFF);
2525 qlcnic_83xx_unlock_flash(adapter); 2535 qlcnic_83xx_unlock_flash(adapter);
@@ -2636,7 +2646,7 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
2636 u32 *p_data, int count) 2646 u32 *p_data, int count)
2637{ 2647{
2638 u32 temp; 2648 u32 temp;
2639 int ret = -EIO; 2649 int ret = -EIO, err = 0;
2640 2650
2641 if ((count < QLC_83XX_FLASH_WRITE_MIN) || 2651 if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
2642 (count > QLC_83XX_FLASH_WRITE_MAX)) { 2652 (count > QLC_83XX_FLASH_WRITE_MAX)) {
@@ -2645,8 +2655,10 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
2645 return -EIO; 2655 return -EIO;
2646 } 2656 }
2647 2657
2648 temp = qlcnic_83xx_rd_reg_indirect(adapter, 2658 temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
2649 QLC_83XX_FLASH_SPI_CONTROL); 2659 if (err == -EIO)
2660 return err;
2661
2650 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL, 2662 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
2651 (temp | QLC_83XX_FLASH_SPI_CTRL)); 2663 (temp | QLC_83XX_FLASH_SPI_CTRL));
2652 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, 2664 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
@@ -2695,13 +2707,18 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
2695 return -EIO; 2707 return -EIO;
2696 } 2708 }
2697 2709
2698 ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS); 2710 ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
2711 if (err == -EIO)
2712 return err;
2713
2699 if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) { 2714 if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
2700 dev_err(&adapter->pdev->dev, "%s: failed at %d\n", 2715 dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
2701 __func__, __LINE__); 2716 __func__, __LINE__);
2702 /* Operation failed, clear error bit */ 2717 /* Operation failed, clear error bit */
2703 temp = qlcnic_83xx_rd_reg_indirect(adapter, 2718 temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
2704 QLC_83XX_FLASH_SPI_CONTROL); 2719 if (err == -EIO)
2720 return err;
2721
2705 qlcnic_83xx_wrt_reg_indirect(adapter, 2722 qlcnic_83xx_wrt_reg_indirect(adapter,
2706 QLC_83XX_FLASH_SPI_CONTROL, 2723 QLC_83XX_FLASH_SPI_CONTROL,
2707 (temp | QLC_83XX_FLASH_SPI_CTRL)); 2724 (temp | QLC_83XX_FLASH_SPI_CTRL));
@@ -2823,6 +2840,7 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
2823{ 2840{
2824 int i, j, ret = 0; 2841 int i, j, ret = 0;
2825 u32 temp; 2842 u32 temp;
2843 int err = 0;
2826 2844
2827 /* Check alignment */ 2845 /* Check alignment */
2828 if (addr & 0xF) 2846 if (addr & 0xF)
@@ -2855,8 +2873,12 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
2855 QLCNIC_TA_WRITE_START); 2873 QLCNIC_TA_WRITE_START);
2856 2874
2857 for (j = 0; j < MAX_CTL_CHECK; j++) { 2875 for (j = 0; j < MAX_CTL_CHECK; j++) {
2858 temp = qlcnic_83xx_rd_reg_indirect(adapter, 2876 temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
2859 QLCNIC_MS_CTRL); 2877 if (err == -EIO) {
2878 mutex_unlock(&adapter->ahw->mem_lock);
2879 return err;
2880 }
2881
2860 if ((temp & TA_CTL_BUSY) == 0) 2882 if ((temp & TA_CTL_BUSY) == 0)
2861 break; 2883 break;
2862 } 2884 }
@@ -2878,9 +2900,9 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
2878int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, 2900int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
2879 u8 *p_data, int count) 2901 u8 *p_data, int count)
2880{ 2902{
2881 int i, ret; 2903 u32 word, addr = flash_addr, ret;
2882 u32 word, addr = flash_addr;
2883 ulong indirect_addr; 2904 ulong indirect_addr;
2905 int i, err = 0;
2884 2906
2885 if (qlcnic_83xx_lock_flash(adapter) != 0) 2907 if (qlcnic_83xx_lock_flash(adapter) != 0)
2886 return -EIO; 2908 return -EIO;
@@ -2900,10 +2922,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
2900 } 2922 }
2901 2923
2902 indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); 2924 indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
2903 ret = qlcnic_83xx_rd_reg_indirect(adapter, 2925 ret = QLCRD32(adapter, indirect_addr, &err);
2904 indirect_addr); 2926 if (err == -EIO)
2905 if (ret == -EIO) 2927 return err;
2906 return -EIO; 2928
2907 word = ret; 2929 word = ret;
2908 *(u32 *)p_data = word; 2930 *(u32 *)p_data = word;
2909 p_data = p_data + 4; 2931 p_data = p_data + 4;
@@ -3014,8 +3036,8 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
3014 } 3036 }
3015 3037
3016 if (ahw->port_type == QLCNIC_XGBE) { 3038 if (ahw->port_type == QLCNIC_XGBE) {
3017 ecmd->supported = SUPPORTED_1000baseT_Full; 3039 ecmd->supported = SUPPORTED_10000baseT_Full;
3018 ecmd->advertising = ADVERTISED_1000baseT_Full; 3040 ecmd->advertising = ADVERTISED_10000baseT_Full;
3019 } else { 3041 } else {
3020 ecmd->supported = (SUPPORTED_10baseT_Half | 3042 ecmd->supported = (SUPPORTED_10baseT_Half |
3021 SUPPORTED_10baseT_Full | 3043 SUPPORTED_10baseT_Full |
@@ -3244,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
3244 u8 val; 3266 u8 val;
3245 int ret, max_sds_rings = adapter->max_sds_rings; 3267 int ret, max_sds_rings = adapter->max_sds_rings;
3246 3268
3269 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
3270 netdev_info(netdev, "Device is resetting\n");
3271 return -EBUSY;
3272 }
3273
3247 if (qlcnic_get_diag_lock(adapter)) { 3274 if (qlcnic_get_diag_lock(adapter)) {
3248 netdev_info(netdev, "Device in diagnostics mode\n"); 3275 netdev_info(netdev, "Device in diagnostics mode\n");
3249 return -EBUSY; 3276 return -EBUSY;
@@ -3369,7 +3396,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
3369 3396
3370static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter) 3397static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
3371{ 3398{
3372 int ret; 3399 int ret, err = 0;
3400 u32 temp;
3373 3401
3374 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, 3402 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
3375 QLC_83XX_FLASH_OEM_READ_SIG); 3403 QLC_83XX_FLASH_OEM_READ_SIG);
@@ -3379,8 +3407,11 @@ static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
3379 if (ret) 3407 if (ret)
3380 return -EIO; 3408 return -EIO;
3381 3409
3382 ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA); 3410 temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
3383 return ret & 0xFF; 3411 if (err == -EIO)
3412 return err;
3413
3414 return temp & 0xFF;
3384} 3415}
3385 3416
3386int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter) 3417int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 2548d1403d75..272f56a2e14b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -508,7 +508,7 @@ void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
508void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *); 508void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
509void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); 509void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
510void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); 510void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
511int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong); 511int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
512int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32); 512int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
513void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []); 513void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
514int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); 514int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f41dfab1e9a3..345d987aede4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
629 return -EIO; 629 return -EIO;
630 } 630 }
631 631
632 qlcnic_set_drv_version(adapter); 632 if (adapter->portnum == 0)
633 qlcnic_set_drv_version(adapter);
633 qlcnic_83xx_idc_attach_driver(adapter); 634 qlcnic_83xx_idc_attach_driver(adapter);
634 635
635 return 0; 636 return 0;
@@ -1303,8 +1304,11 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
1303{ 1304{
1304 int i, j; 1305 int i, j;
1305 u32 val = 0, val1 = 0, reg = 0; 1306 u32 val = 0, val1 = 0, reg = 0;
1307 int err = 0;
1306 1308
1307 val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG); 1309 val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err);
1310 if (err == -EIO)
1311 return;
1308 dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val); 1312 dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
1309 1313
1310 for (j = 0; j < 2; j++) { 1314 for (j = 0; j < 2; j++) {
@@ -1318,7 +1322,9 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
1318 reg = QLC_83XX_PORT1_THRESHOLD; 1322 reg = QLC_83XX_PORT1_THRESHOLD;
1319 } 1323 }
1320 for (i = 0; i < 8; i++) { 1324 for (i = 0; i < 8; i++) {
1321 val = QLCRD32(adapter, reg + (i * 0x4)); 1325 val = QLCRD32(adapter, reg + (i * 0x4), &err);
1326 if (err == -EIO)
1327 return;
1322 dev_info(&adapter->pdev->dev, "0x%x ", val); 1328 dev_info(&adapter->pdev->dev, "0x%x ", val);
1323 } 1329 }
1324 dev_info(&adapter->pdev->dev, "\n"); 1330 dev_info(&adapter->pdev->dev, "\n");
@@ -1335,8 +1341,10 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
1335 reg = QLC_83XX_PORT1_TC_MC_REG; 1341 reg = QLC_83XX_PORT1_TC_MC_REG;
1336 } 1342 }
1337 for (i = 0; i < 4; i++) { 1343 for (i = 0; i < 4; i++) {
1338 val = QLCRD32(adapter, reg + (i * 0x4)); 1344 val = QLCRD32(adapter, reg + (i * 0x4), &err);
1339 dev_info(&adapter->pdev->dev, "0x%x ", val); 1345 if (err == -EIO)
1346 return;
1347 dev_info(&adapter->pdev->dev, "0x%x ", val);
1340 } 1348 }
1341 dev_info(&adapter->pdev->dev, "\n"); 1349 dev_info(&adapter->pdev->dev, "\n");
1342 } 1350 }
@@ -1352,17 +1360,25 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
1352 reg = QLC_83XX_PORT1_TC_STATS; 1360 reg = QLC_83XX_PORT1_TC_STATS;
1353 } 1361 }
1354 for (i = 7; i >= 0; i--) { 1362 for (i = 7; i >= 0; i--) {
1355 val = QLCRD32(adapter, reg); 1363 val = QLCRD32(adapter, reg, &err);
1364 if (err == -EIO)
1365 return;
1356 val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ 1366 val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
1357 QLCWR32(adapter, reg, (val | (i << 29))); 1367 QLCWR32(adapter, reg, (val | (i << 29)));
1358 val = QLCRD32(adapter, reg); 1368 val = QLCRD32(adapter, reg, &err);
1369 if (err == -EIO)
1370 return;
1359 dev_info(&adapter->pdev->dev, "0x%x ", val); 1371 dev_info(&adapter->pdev->dev, "0x%x ", val);
1360 } 1372 }
1361 dev_info(&adapter->pdev->dev, "\n"); 1373 dev_info(&adapter->pdev->dev, "\n");
1362 } 1374 }
1363 1375
1364 val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD); 1376 val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err);
1365 val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD); 1377 if (err == -EIO)
1378 return;
1379 val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err);
1380 if (err == -EIO)
1381 return;
1366 dev_info(&adapter->pdev->dev, 1382 dev_info(&adapter->pdev->dev,
1367 "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", 1383 "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
1368 val, val1); 1384 val, val1);
@@ -1425,7 +1441,7 @@ static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
1425static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev) 1441static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
1426{ 1442{
1427 u32 heartbeat, peg_status; 1443 u32 heartbeat, peg_status;
1428 int retries, ret = -EIO; 1444 int retries, ret = -EIO, err = 0;
1429 1445
1430 retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; 1446 retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
1431 p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev, 1447 p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
@@ -1453,11 +1469,11 @@ static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
1453 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" 1469 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
1454 "PEG_NET_4_PC: 0x%x\n", peg_status, 1470 "PEG_NET_4_PC: 0x%x\n", peg_status,
1455 QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2), 1471 QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
1456 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0), 1472 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err),
1457 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1), 1473 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err),
1458 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2), 1474 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err),
1459 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3), 1475 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err),
1460 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4)); 1476 QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err));
1461 1477
1462 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) 1478 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
1463 dev_err(&p_dev->pdev->dev, 1479 dev_err(&p_dev->pdev->dev,
@@ -1501,18 +1517,22 @@ int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
1501static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr, 1517static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
1502 int duration, u32 mask, u32 status) 1518 int duration, u32 mask, u32 status)
1503{ 1519{
1520 int timeout_error, err = 0;
1504 u32 value; 1521 u32 value;
1505 int timeout_error;
1506 u8 retries; 1522 u8 retries;
1507 1523
1508 value = qlcnic_83xx_rd_reg_indirect(p_dev, addr); 1524 value = QLCRD32(p_dev, addr, &err);
1525 if (err == -EIO)
1526 return err;
1509 retries = duration / 10; 1527 retries = duration / 10;
1510 1528
1511 do { 1529 do {
1512 if ((value & mask) != status) { 1530 if ((value & mask) != status) {
1513 timeout_error = 1; 1531 timeout_error = 1;
1514 msleep(duration / 10); 1532 msleep(duration / 10);
1515 value = qlcnic_83xx_rd_reg_indirect(p_dev, addr); 1533 value = QLCRD32(p_dev, addr, &err);
1534 if (err == -EIO)
1535 return err;
1516 } else { 1536 } else {
1517 timeout_error = 0; 1537 timeout_error = 0;
1518 break; 1538 break;
@@ -1606,9 +1626,12 @@ int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
1606static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev, 1626static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
1607 u32 raddr, u32 waddr) 1627 u32 raddr, u32 waddr)
1608{ 1628{
1609 int value; 1629 int err = 0;
1630 u32 value;
1610 1631
1611 value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr); 1632 value = QLCRD32(p_dev, raddr, &err);
1633 if (err == -EIO)
1634 return;
1612 qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); 1635 qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
1613} 1636}
1614 1637
@@ -1617,12 +1640,16 @@ static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
1617 u32 raddr, u32 waddr, 1640 u32 raddr, u32 waddr,
1618 struct qlc_83xx_rmw *p_rmw_hdr) 1641 struct qlc_83xx_rmw *p_rmw_hdr)
1619{ 1642{
1620 int value; 1643 int err = 0;
1644 u32 value;
1621 1645
1622 if (p_rmw_hdr->index_a) 1646 if (p_rmw_hdr->index_a) {
1623 value = p_dev->ahw->reset.array[p_rmw_hdr->index_a]; 1647 value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
1624 else 1648 } else {
1625 value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr); 1649 value = QLCRD32(p_dev, raddr, &err);
1650 if (err == -EIO)
1651 return;
1652 }
1626 1653
1627 value &= p_rmw_hdr->mask; 1654 value &= p_rmw_hdr->mask;
1628 value <<= p_rmw_hdr->shl; 1655 value <<= p_rmw_hdr->shl;
@@ -1675,7 +1702,7 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
1675 long delay; 1702 long delay;
1676 struct qlc_83xx_entry *entry; 1703 struct qlc_83xx_entry *entry;
1677 struct qlc_83xx_poll *poll; 1704 struct qlc_83xx_poll *poll;
1678 int i; 1705 int i, err = 0;
1679 unsigned long arg1, arg2; 1706 unsigned long arg1, arg2;
1680 1707
1681 poll = (struct qlc_83xx_poll *)((char *)p_hdr + 1708 poll = (struct qlc_83xx_poll *)((char *)p_hdr +
@@ -1699,10 +1726,12 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
1699 arg1, delay, 1726 arg1, delay,
1700 poll->mask, 1727 poll->mask,
1701 poll->status)){ 1728 poll->status)){
1702 qlcnic_83xx_rd_reg_indirect(p_dev, 1729 QLCRD32(p_dev, arg1, &err);
1703 arg1); 1730 if (err == -EIO)
1704 qlcnic_83xx_rd_reg_indirect(p_dev, 1731 return;
1705 arg2); 1732 QLCRD32(p_dev, arg2, &err);
1733 if (err == -EIO)
1734 return;
1706 } 1735 }
1707 } 1736 }
1708 } 1737 }
@@ -1768,7 +1797,7 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
1768 struct qlc_83xx_entry_hdr *p_hdr) 1797 struct qlc_83xx_entry_hdr *p_hdr)
1769{ 1798{
1770 long delay; 1799 long delay;
1771 int index, i, j; 1800 int index, i, j, err;
1772 struct qlc_83xx_quad_entry *entry; 1801 struct qlc_83xx_quad_entry *entry;
1773 struct qlc_83xx_poll *poll; 1802 struct qlc_83xx_poll *poll;
1774 unsigned long addr; 1803 unsigned long addr;
@@ -1788,7 +1817,10 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
1788 poll->mask, poll->status)){ 1817 poll->mask, poll->status)){
1789 index = p_dev->ahw->reset.array_index; 1818 index = p_dev->ahw->reset.array_index;
1790 addr = entry->dr_addr; 1819 addr = entry->dr_addr;
1791 j = qlcnic_83xx_rd_reg_indirect(p_dev, addr); 1820 j = QLCRD32(p_dev, addr, &err);
1821 if (err == -EIO)
1822 return;
1823
1792 p_dev->ahw->reset.array[index++] = j; 1824 p_dev->ahw->reset.array[index++] = j;
1793 1825
1794 if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES) 1826 if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
@@ -2123,6 +2155,8 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2123 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 2155 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
2124 qlcnic_83xx_clear_function_resources(adapter); 2156 qlcnic_83xx_clear_function_resources(adapter);
2125 2157
2158 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2159
2126 /* register for NIC IDC AEN Events */ 2160 /* register for NIC IDC AEN Events */
2127 qlcnic_83xx_register_nic_idc_func(adapter, 1); 2161 qlcnic_83xx_register_nic_idc_func(adapter, 1);
2128 2162
@@ -2140,8 +2174,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2140 if (adapter->nic_ops->init_driver(adapter)) 2174 if (adapter->nic_ops->init_driver(adapter))
2141 return -EIO; 2175 return -EIO;
2142 2176
2143 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2144
2145 /* Periodically monitor device status */ 2177 /* Periodically monitor device status */
2146 qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); 2178 qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
2147 2179
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 0581a484ceb5..d09389b33474 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -104,7 +104,7 @@ static u32
104qlcnic_poll_rsp(struct qlcnic_adapter *adapter) 104qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
105{ 105{
106 u32 rsp; 106 u32 rsp;
107 int timeout = 0; 107 int timeout = 0, err = 0;
108 108
109 do { 109 do {
110 /* give atleast 1ms for firmware to respond */ 110 /* give atleast 1ms for firmware to respond */
@@ -113,7 +113,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
113 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) 113 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
114 return QLCNIC_CDRP_RSP_TIMEOUT; 114 return QLCNIC_CDRP_RSP_TIMEOUT;
115 115
116 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); 116 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
117 } while (!QLCNIC_CDRP_IS_RSP(rsp)); 117 } while (!QLCNIC_CDRP_IS_RSP(rsp));
118 118
119 return rsp; 119 return rsp;
@@ -122,7 +122,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
122int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, 122int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
123 struct qlcnic_cmd_args *cmd) 123 struct qlcnic_cmd_args *cmd)
124{ 124{
125 int i; 125 int i, err = 0;
126 u32 rsp; 126 u32 rsp;
127 u32 signature; 127 u32 signature;
128 struct pci_dev *pdev = adapter->pdev; 128 struct pci_dev *pdev = adapter->pdev;
@@ -148,7 +148,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
148 dev_err(&pdev->dev, "card response timeout.\n"); 148 dev_err(&pdev->dev, "card response timeout.\n");
149 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 149 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
150 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 150 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
151 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1)); 151 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
152 switch (cmd->rsp.arg[0]) { 152 switch (cmd->rsp.arg[0]) {
153 case QLCNIC_RCODE_INVALID_ARGS: 153 case QLCNIC_RCODE_INVALID_ARGS:
154 fmt = "CDRP invalid args: [%d]\n"; 154 fmt = "CDRP invalid args: [%d]\n";
@@ -175,7 +175,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
175 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; 175 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
176 176
177 for (i = 1; i < cmd->rsp.num; i++) 177 for (i = 1; i < cmd->rsp.num; i++)
178 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i)); 178 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
179 179
180 /* Release semaphore */ 180 /* Release semaphore */
181 qlcnic_api_unlock(adapter); 181 qlcnic_api_unlock(adapter);
@@ -210,10 +210,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
210 if (err) { 210 if (err) {
211 dev_info(&adapter->pdev->dev, 211 dev_info(&adapter->pdev->dev,
212 "Failed to set driver version in firmware\n"); 212 "Failed to set driver version in firmware\n");
213 return -EIO; 213 err = -EIO;
214 } 214 }
215 215 qlcnic_free_mbx_args(&cmd);
216 return 0; 216 return err;
217} 217}
218 218
219int 219int
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 700a46324d09..7aac23ab31d1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -150,6 +150,7 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
150 "Link_Test_on_offline", 150 "Link_Test_on_offline",
151 "Interrupt_Test_offline", 151 "Interrupt_Test_offline",
152 "Internal_Loopback_offline", 152 "Internal_Loopback_offline",
153 "External_Loopback_offline",
153 "EEPROM_Test_offline" 154 "EEPROM_Test_offline"
154}; 155};
155 156
@@ -266,7 +267,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
266{ 267{
267 struct qlcnic_hardware_context *ahw = adapter->ahw; 268 struct qlcnic_hardware_context *ahw = adapter->ahw;
268 u32 speed, reg; 269 u32 speed, reg;
269 int check_sfp_module = 0; 270 int check_sfp_module = 0, err = 0;
270 u16 pcifn = ahw->pci_func; 271 u16 pcifn = ahw->pci_func;
271 272
272 /* read which mode */ 273 /* read which mode */
@@ -289,7 +290,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
289 290
290 } else if (adapter->ahw->port_type == QLCNIC_XGBE) { 291 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
291 u32 val = 0; 292 u32 val = 0;
292 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); 293 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
293 294
294 if (val == QLCNIC_PORT_MODE_802_3_AP) { 295 if (val == QLCNIC_PORT_MODE_802_3_AP) {
295 ecmd->supported = SUPPORTED_1000baseT_Full; 296 ecmd->supported = SUPPORTED_1000baseT_Full;
@@ -300,9 +301,13 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
300 } 301 }
301 302
302 if (netif_running(adapter->netdev) && ahw->has_link_events) { 303 if (netif_running(adapter->netdev) && ahw->has_link_events) {
303 reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); 304 if (ahw->linkup) {
304 speed = P3P_LINK_SPEED_VAL(pcifn, reg); 305 reg = QLCRD32(adapter,
305 ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; 306 P3P_LINK_SPEED_REG(pcifn), &err);
307 speed = P3P_LINK_SPEED_VAL(pcifn, reg);
308 ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
309 }
310
306 ethtool_cmd_speed_set(ecmd, ahw->link_speed); 311 ethtool_cmd_speed_set(ecmd, ahw->link_speed);
307 ecmd->autoneg = ahw->link_autoneg; 312 ecmd->autoneg = ahw->link_autoneg;
308 ecmd->duplex = ahw->link_duplex; 313 ecmd->duplex = ahw->link_duplex;
@@ -463,13 +468,14 @@ static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
463static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter, 468static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
464 u32 *regs_buff) 469 u32 *regs_buff)
465{ 470{
466 int i, j = 0; 471 int i, j = 0, err = 0;
467 472
468 for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) 473 for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
469 regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]); 474 regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
470 j = 0; 475 j = 0;
471 while (ext_diag_registers[j] != -1) 476 while (ext_diag_registers[j] != -1)
472 regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]); 477 regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++],
478 &err);
473 return i; 479 return i;
474} 480}
475 481
@@ -519,13 +525,16 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
519static u32 qlcnic_test_link(struct net_device *dev) 525static u32 qlcnic_test_link(struct net_device *dev)
520{ 526{
521 struct qlcnic_adapter *adapter = netdev_priv(dev); 527 struct qlcnic_adapter *adapter = netdev_priv(dev);
528 int err = 0;
522 u32 val; 529 u32 val;
523 530
524 if (qlcnic_83xx_check(adapter)) { 531 if (qlcnic_83xx_check(adapter)) {
525 val = qlcnic_83xx_test_link(adapter); 532 val = qlcnic_83xx_test_link(adapter);
526 return (val & 1) ? 0 : 1; 533 return (val & 1) ? 0 : 1;
527 } 534 }
528 val = QLCRD32(adapter, CRB_XG_STATE_P3P); 535 val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err);
536 if (err == -EIO)
537 return err;
529 val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); 538 val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
530 return (val == XG_LINK_UP_P3P) ? 0 : 1; 539 return (val == XG_LINK_UP_P3P) ? 0 : 1;
531} 540}
@@ -658,6 +667,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
658{ 667{
659 struct qlcnic_adapter *adapter = netdev_priv(netdev); 668 struct qlcnic_adapter *adapter = netdev_priv(netdev);
660 int port = adapter->ahw->physical_port; 669 int port = adapter->ahw->physical_port;
670 int err = 0;
661 __u32 val; 671 __u32 val;
662 672
663 if (qlcnic_83xx_check(adapter)) { 673 if (qlcnic_83xx_check(adapter)) {
@@ -668,9 +678,13 @@ qlcnic_get_pauseparam(struct net_device *netdev,
668 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) 678 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
669 return; 679 return;
670 /* get flow control settings */ 680 /* get flow control settings */
671 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); 681 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
682 if (err == -EIO)
683 return;
672 pause->rx_pause = qlcnic_gb_get_rx_flowctl(val); 684 pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
673 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); 685 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
686 if (err == -EIO)
687 return;
674 switch (port) { 688 switch (port) {
675 case 0: 689 case 0:
676 pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val)); 690 pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
@@ -690,7 +704,9 @@ qlcnic_get_pauseparam(struct net_device *netdev,
690 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) 704 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
691 return; 705 return;
692 pause->rx_pause = 1; 706 pause->rx_pause = 1;
693 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); 707 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
708 if (err == -EIO)
709 return;
694 if (port == 0) 710 if (port == 0)
695 pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val)); 711 pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
696 else 712 else
@@ -707,6 +723,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
707{ 723{
708 struct qlcnic_adapter *adapter = netdev_priv(netdev); 724 struct qlcnic_adapter *adapter = netdev_priv(netdev);
709 int port = adapter->ahw->physical_port; 725 int port = adapter->ahw->physical_port;
726 int err = 0;
710 __u32 val; 727 __u32 val;
711 728
712 if (qlcnic_83xx_check(adapter)) 729 if (qlcnic_83xx_check(adapter))
@@ -717,7 +734,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
717 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) 734 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
718 return -EIO; 735 return -EIO;
719 /* set flow control */ 736 /* set flow control */
720 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); 737 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
738 if (err == -EIO)
739 return err;
721 740
722 if (pause->rx_pause) 741 if (pause->rx_pause)
723 qlcnic_gb_rx_flowctl(val); 742 qlcnic_gb_rx_flowctl(val);
@@ -728,7 +747,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
728 val); 747 val);
729 QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val); 748 QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
730 /* set autoneg */ 749 /* set autoneg */
731 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); 750 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
751 if (err == -EIO)
752 return err;
732 switch (port) { 753 switch (port) {
733 case 0: 754 case 0:
734 if (pause->tx_pause) 755 if (pause->tx_pause)
@@ -764,7 +785,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
764 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) 785 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
765 return -EIO; 786 return -EIO;
766 787
767 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); 788 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
789 if (err == -EIO)
790 return err;
768 if (port == 0) { 791 if (port == 0) {
769 if (pause->tx_pause) 792 if (pause->tx_pause)
770 qlcnic_xg_unset_xg0_mask(val); 793 qlcnic_xg_unset_xg0_mask(val);
@@ -788,11 +811,14 @@ static int qlcnic_reg_test(struct net_device *dev)
788{ 811{
789 struct qlcnic_adapter *adapter = netdev_priv(dev); 812 struct qlcnic_adapter *adapter = netdev_priv(dev);
790 u32 data_read; 813 u32 data_read;
814 int err = 0;
791 815
792 if (qlcnic_83xx_check(adapter)) 816 if (qlcnic_83xx_check(adapter))
793 return qlcnic_83xx_reg_test(adapter); 817 return qlcnic_83xx_reg_test(adapter);
794 818
795 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0)); 819 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err);
820 if (err == -EIO)
821 return err;
796 if ((data_read & 0xffff) != adapter->pdev->vendor) 822 if ((data_read & 0xffff) != adapter->pdev->vendor)
797 return 1; 823 return 1;
798 824
@@ -1026,8 +1052,15 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
1026 if (data[3]) 1052 if (data[3])
1027 eth_test->flags |= ETH_TEST_FL_FAILED; 1053 eth_test->flags |= ETH_TEST_FL_FAILED;
1028 1054
1029 data[4] = qlcnic_eeprom_test(dev); 1055 if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
1030 if (data[4]) 1056 data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
1057 if (data[4])
1058 eth_test->flags |= ETH_TEST_FL_FAILED;
1059 eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
1060 }
1061
1062 data[5] = qlcnic_eeprom_test(dev);
1063 if (data[5])
1031 eth_test->flags |= ETH_TEST_FL_FAILED; 1064 eth_test->flags |= ETH_TEST_FL_FAILED;
1032 } 1065 }
1033} 1066}
@@ -1257,17 +1290,20 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1257{ 1290{
1258 struct qlcnic_adapter *adapter = netdev_priv(dev); 1291 struct qlcnic_adapter *adapter = netdev_priv(dev);
1259 u32 wol_cfg; 1292 u32 wol_cfg;
1293 int err = 0;
1260 1294
1261 if (qlcnic_83xx_check(adapter)) 1295 if (qlcnic_83xx_check(adapter))
1262 return; 1296 return;
1263 wol->supported = 0; 1297 wol->supported = 0;
1264 wol->wolopts = 0; 1298 wol->wolopts = 0;
1265 1299
1266 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); 1300 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
1301 if (err == -EIO)
1302 return;
1267 if (wol_cfg & (1UL << adapter->portnum)) 1303 if (wol_cfg & (1UL << adapter->portnum))
1268 wol->supported |= WAKE_MAGIC; 1304 wol->supported |= WAKE_MAGIC;
1269 1305
1270 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); 1306 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
1271 if (wol_cfg & (1UL << adapter->portnum)) 1307 if (wol_cfg & (1UL << adapter->portnum))
1272 wol->wolopts |= WAKE_MAGIC; 1308 wol->wolopts |= WAKE_MAGIC;
1273} 1309}
@@ -1277,17 +1313,22 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1277{ 1313{
1278 struct qlcnic_adapter *adapter = netdev_priv(dev); 1314 struct qlcnic_adapter *adapter = netdev_priv(dev);
1279 u32 wol_cfg; 1315 u32 wol_cfg;
1316 int err = 0;
1280 1317
1281 if (qlcnic_83xx_check(adapter)) 1318 if (qlcnic_83xx_check(adapter))
1282 return -EOPNOTSUPP; 1319 return -EOPNOTSUPP;
1283 if (wol->wolopts & ~WAKE_MAGIC) 1320 if (wol->wolopts & ~WAKE_MAGIC)
1284 return -EINVAL; 1321 return -EINVAL;
1285 1322
1286 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); 1323 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
1324 if (err == -EIO)
1325 return err;
1287 if (!(wol_cfg & (1 << adapter->portnum))) 1326 if (!(wol_cfg & (1 << adapter->portnum)))
1288 return -EOPNOTSUPP; 1327 return -EOPNOTSUPP;
1289 1328
1290 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); 1329 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
1330 if (err == -EIO)
1331 return err;
1291 if (wol->wolopts & WAKE_MAGIC) 1332 if (wol->wolopts & WAKE_MAGIC)
1292 wol_cfg |= 1UL << adapter->portnum; 1333 wol_cfg |= 1UL << adapter->portnum;
1293 else 1334 else
@@ -1540,7 +1581,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1540 return 0; 1581 return 0;
1541 case QLCNIC_SET_QUIESCENT: 1582 case QLCNIC_SET_QUIESCENT:
1542 case QLCNIC_RESET_QUIESCENT: 1583 case QLCNIC_RESET_QUIESCENT:
1543 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 1584 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1544 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) 1585 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
1545 netdev_info(netdev, "Device in FAILED state\n"); 1586 netdev_info(netdev, "Device in FAILED state\n");
1546 return 0; 1587 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 5b5d2edf125d..4d5f59b2d153 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -317,16 +317,20 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
317int 317int
318qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) 318qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
319{ 319{
320 int done = 0, timeout = 0; 320 int timeout = 0;
321 int err = 0;
322 u32 done = 0;
321 323
322 while (!done) { 324 while (!done) {
323 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); 325 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
326 &err);
324 if (done == 1) 327 if (done == 1)
325 break; 328 break;
326 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { 329 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
327 dev_err(&adapter->pdev->dev, 330 dev_err(&adapter->pdev->dev,
328 "Failed to acquire sem=%d lock; holdby=%d\n", 331 "Failed to acquire sem=%d lock; holdby=%d\n",
329 sem, id_reg ? QLCRD32(adapter, id_reg) : -1); 332 sem,
333 id_reg ? QLCRD32(adapter, id_reg, &err) : -1);
330 return -EIO; 334 return -EIO;
331 } 335 }
332 msleep(1); 336 msleep(1);
@@ -341,19 +345,22 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
341void 345void
342qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) 346qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
343{ 347{
344 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem))); 348 int err = 0;
349
350 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err);
345} 351}
346 352
347int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr) 353int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
348{ 354{
355 int err = 0;
349 u32 data; 356 u32 data;
350 357
351 if (qlcnic_82xx_check(adapter)) 358 if (qlcnic_82xx_check(adapter))
352 qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data); 359 qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
353 else { 360 else {
354 data = qlcnic_83xx_rd_reg_indirect(adapter, addr); 361 data = QLCRD32(adapter, addr, &err);
355 if (data == -EIO) 362 if (err == -EIO)
356 return -EIO; 363 return err;
357 } 364 }
358 return data; 365 return data;
359} 366}
@@ -516,20 +523,18 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
516 if (netdev->flags & IFF_PROMISC) { 523 if (netdev->flags & IFF_PROMISC) {
517 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) 524 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
518 mode = VPORT_MISS_MODE_ACCEPT_ALL; 525 mode = VPORT_MISS_MODE_ACCEPT_ALL;
519 } else if (netdev->flags & IFF_ALLMULTI) { 526 } else if ((netdev->flags & IFF_ALLMULTI) ||
520 if (netdev_mc_count(netdev) > ahw->max_mc_count) { 527 (netdev_mc_count(netdev) > ahw->max_mc_count)) {
521 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 528 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
522 } else if (!netdev_mc_empty(netdev) && 529 } else if (!netdev_mc_empty(netdev) &&
523 !qlcnic_sriov_vf_check(adapter)) { 530 !qlcnic_sriov_vf_check(adapter)) {
524 netdev_for_each_mc_addr(ha, netdev) 531 netdev_for_each_mc_addr(ha, netdev)
525 qlcnic_nic_add_mac(adapter, ha->addr, 532 qlcnic_nic_add_mac(adapter, ha->addr, vlan);
526 vlan);
527 }
528 if (mode != VPORT_MISS_MODE_ACCEPT_MULTI &&
529 qlcnic_sriov_vf_check(adapter))
530 qlcnic_vf_add_mc_list(netdev, vlan);
531 } 533 }
532 534
535 if (qlcnic_sriov_vf_check(adapter))
536 qlcnic_vf_add_mc_list(netdev, vlan);
537
533 /* configure unicast MAC address, if there is not sufficient space 538 /* configure unicast MAC address, if there is not sufficient space
534 * to store all the unicast addresses then enable promiscuous mode 539 * to store all the unicast addresses then enable promiscuous mode
535 */ 540 */
@@ -1161,7 +1166,8 @@ int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
1161 return -EIO; 1166 return -EIO;
1162} 1167}
1163 1168
1164int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) 1169int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off,
1170 int *err)
1165{ 1171{
1166 unsigned long flags; 1172 unsigned long flags;
1167 int rv; 1173 int rv;
@@ -1417,7 +1423,7 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
1417 1423
1418int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter) 1424int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
1419{ 1425{
1420 int offset, board_type, magic; 1426 int offset, board_type, magic, err = 0;
1421 struct pci_dev *pdev = adapter->pdev; 1427 struct pci_dev *pdev = adapter->pdev;
1422 1428
1423 offset = QLCNIC_FW_MAGIC_OFFSET; 1429 offset = QLCNIC_FW_MAGIC_OFFSET;
@@ -1437,7 +1443,9 @@ int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
1437 adapter->ahw->board_type = board_type; 1443 adapter->ahw->board_type = board_type;
1438 1444
1439 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { 1445 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
1440 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); 1446 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err);
1447 if (err == -EIO)
1448 return err;
1441 if ((gpio & 0x8000) == 0) 1449 if ((gpio & 0x8000) == 0)
1442 board_type = QLCNIC_BRDTYPE_P3P_10G_TP; 1450 board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
1443 } 1451 }
@@ -1477,10 +1485,13 @@ int
1477qlcnic_wol_supported(struct qlcnic_adapter *adapter) 1485qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1478{ 1486{
1479 u32 wol_cfg; 1487 u32 wol_cfg;
1488 int err = 0;
1480 1489
1481 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); 1490 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
1482 if (wol_cfg & (1UL << adapter->portnum)) { 1491 if (wol_cfg & (1UL << adapter->portnum)) {
1483 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); 1492 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
1493 if (err == -EIO)
1494 return err;
1484 if (wol_cfg & (1 << adapter->portnum)) 1495 if (wol_cfg & (1 << adapter->portnum))
1485 return 1; 1496 return 1;
1486 } 1497 }
@@ -1541,6 +1552,7 @@ void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
1541void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf, 1552void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
1542 loff_t offset, size_t size) 1553 loff_t offset, size_t size)
1543{ 1554{
1555 int err = 0;
1544 u32 data; 1556 u32 data;
1545 u64 qmdata; 1557 u64 qmdata;
1546 1558
@@ -1548,7 +1560,7 @@ void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
1548 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); 1560 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
1549 memcpy(buf, &qmdata, size); 1561 memcpy(buf, &qmdata, size);
1550 } else { 1562 } else {
1551 data = QLCRD32(adapter, offset); 1563 data = QLCRD32(adapter, offset, &err);
1552 memcpy(buf, &data, size); 1564 memcpy(buf, &data, size);
1553 } 1565 }
1554} 1566}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 2c22504f57aa..4a71b28effcb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -154,7 +154,7 @@ struct qlcnic_hardware_context;
154struct qlcnic_adapter; 154struct qlcnic_adapter;
155 155
156int qlcnic_82xx_start_firmware(struct qlcnic_adapter *); 156int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
157int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong); 157int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
158int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); 158int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
159int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int); 159int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
160int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32); 160int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index d28336fc65ab..974d62607e13 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -142,7 +142,7 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
142 buffrag->length, PCI_DMA_TODEVICE); 142 buffrag->length, PCI_DMA_TODEVICE);
143 buffrag->dma = 0ULL; 143 buffrag->dma = 0ULL;
144 } 144 }
145 for (j = 0; j < cmd_buf->frag_count; j++) { 145 for (j = 1; j < cmd_buf->frag_count; j++) {
146 buffrag++; 146 buffrag++;
147 if (buffrag->dma) { 147 if (buffrag->dma) {
148 pci_unmap_page(adapter->pdev, buffrag->dma, 148 pci_unmap_page(adapter->pdev, buffrag->dma,
@@ -286,10 +286,11 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
286{ 286{
287 long timeout = 0; 287 long timeout = 0;
288 long done = 0; 288 long done = 0;
289 int err = 0;
289 290
290 cond_resched(); 291 cond_resched();
291 while (done == 0) { 292 while (done == 0) {
292 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS); 293 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err);
293 done &= 2; 294 done &= 2;
294 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { 295 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
295 dev_err(&adapter->pdev->dev, 296 dev_err(&adapter->pdev->dev,
@@ -304,6 +305,8 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
304static int do_rom_fast_read(struct qlcnic_adapter *adapter, 305static int do_rom_fast_read(struct qlcnic_adapter *adapter,
305 u32 addr, u32 *valp) 306 u32 addr, u32 *valp)
306{ 307{
308 int err = 0;
309
307 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); 310 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
308 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 311 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
309 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); 312 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
@@ -317,7 +320,9 @@ static int do_rom_fast_read(struct qlcnic_adapter *adapter,
317 udelay(10); 320 udelay(10);
318 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 321 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
319 322
320 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA); 323 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err);
324 if (err == -EIO)
325 return err;
321 return 0; 326 return 0;
322} 327}
323 328
@@ -369,11 +374,11 @@ int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
369 374
370int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) 375int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
371{ 376{
372 int addr, val; 377 int addr, err = 0;
373 int i, n, init_delay; 378 int i, n, init_delay;
374 struct crb_addr_pair *buf; 379 struct crb_addr_pair *buf;
375 unsigned offset; 380 unsigned offset;
376 u32 off; 381 u32 off, val;
377 struct pci_dev *pdev = adapter->pdev; 382 struct pci_dev *pdev = adapter->pdev;
378 383
379 QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0); 384 QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
@@ -402,7 +407,9 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
402 QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00); 407 QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
403 408
404 /* halt sre */ 409 /* halt sre */
405 val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000); 410 val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err);
411 if (err == -EIO)
412 return err;
406 QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1))); 413 QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
407 414
408 /* halt epg */ 415 /* halt epg */
@@ -719,10 +726,12 @@ qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
719static int 726static int
720qlcnic_has_mn(struct qlcnic_adapter *adapter) 727qlcnic_has_mn(struct qlcnic_adapter *adapter)
721{ 728{
722 u32 capability; 729 u32 capability = 0;
723 capability = 0; 730 int err = 0;
724 731
725 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); 732 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err);
733 if (err == -EIO)
734 return err;
726 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) 735 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
727 return 1; 736 return 1;
728 737
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index d3f8797efcc3..6946d354f44f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -161,36 +161,68 @@ static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
161 return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0; 161 return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
162} 162}
163 163
164static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
165 struct qlcnic_filter *fil,
166 void *addr, u16 vlan_id)
167{
168 int ret;
169 u8 op;
170
171 op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
172 ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
173 if (ret)
174 return;
175
176 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
177 ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
178 if (!ret) {
179 hlist_del(&fil->fnode);
180 adapter->rx_fhash.fnum--;
181 }
182}
183
184static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
185 void *addr, u16 vlan_id)
186{
187 struct qlcnic_filter *tmp_fil = NULL;
188 struct hlist_node *n;
189
190 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
191 if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
192 tmp_fil->vlan_id == vlan_id)
193 return tmp_fil;
194 }
195
196 return NULL;
197}
198
164void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, 199void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
165 int loopback_pkt, u16 vlan_id) 200 int loopback_pkt, u16 vlan_id)
166{ 201{
167 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 202 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
168 struct qlcnic_filter *fil, *tmp_fil; 203 struct qlcnic_filter *fil, *tmp_fil;
169 struct hlist_node *n;
170 struct hlist_head *head; 204 struct hlist_head *head;
171 unsigned long time; 205 unsigned long time;
172 u64 src_addr = 0; 206 u64 src_addr = 0;
173 u8 hindex, found = 0, op; 207 u8 hindex, op;
174 int ret; 208 int ret;
175 209
176 memcpy(&src_addr, phdr->h_source, ETH_ALEN); 210 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
211 hindex = qlcnic_mac_hash(src_addr) &
212 (adapter->fhash.fbucket_size - 1);
177 213
178 if (loopback_pkt) { 214 if (loopback_pkt) {
179 if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax) 215 if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
180 return; 216 return;
181 217
182 hindex = qlcnic_mac_hash(src_addr) &
183 (adapter->fhash.fbucket_size - 1);
184 head = &(adapter->rx_fhash.fhead[hindex]); 218 head = &(adapter->rx_fhash.fhead[hindex]);
185 219
186 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { 220 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
187 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 221 if (tmp_fil) {
188 tmp_fil->vlan_id == vlan_id) { 222 time = tmp_fil->ftime;
189 time = tmp_fil->ftime; 223 if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
190 if (jiffies > (QLCNIC_READD_AGE * HZ + time)) 224 tmp_fil->ftime = jiffies;
191 tmp_fil->ftime = jiffies; 225 return;
192 return;
193 }
194 } 226 }
195 227
196 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); 228 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
@@ -205,36 +237,37 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
205 adapter->rx_fhash.fnum++; 237 adapter->rx_fhash.fnum++;
206 spin_unlock(&adapter->rx_mac_learn_lock); 238 spin_unlock(&adapter->rx_mac_learn_lock);
207 } else { 239 } else {
208 hindex = qlcnic_mac_hash(src_addr) & 240 head = &adapter->fhash.fhead[hindex];
209 (adapter->fhash.fbucket_size - 1);
210 head = &(adapter->rx_fhash.fhead[hindex]);
211 spin_lock(&adapter->rx_mac_learn_lock);
212 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
213 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
214 tmp_fil->vlan_id == vlan_id) {
215 found = 1;
216 break;
217 }
218 }
219 241
220 if (!found) { 242 spin_lock(&adapter->mac_learn_lock);
221 spin_unlock(&adapter->rx_mac_learn_lock);
222 return;
223 }
224 243
225 op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; 244 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
226 ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr, 245 if (tmp_fil) {
227 vlan_id, op);
228 if (!ret) {
229 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; 246 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
230 ret = qlcnic_sre_macaddr_change(adapter, 247 ret = qlcnic_sre_macaddr_change(adapter,
231 (u8 *)&src_addr, 248 (u8 *)&src_addr,
232 vlan_id, op); 249 vlan_id, op);
233 if (!ret) { 250 if (!ret) {
234 hlist_del(&(tmp_fil->fnode)); 251 hlist_del(&tmp_fil->fnode);
235 adapter->rx_fhash.fnum--; 252 adapter->fhash.fnum--;
236 } 253 }
254
255 spin_unlock(&adapter->mac_learn_lock);
256
257 return;
237 } 258 }
259
260 spin_unlock(&adapter->mac_learn_lock);
261
262 head = &adapter->rx_fhash.fhead[hindex];
263
264 spin_lock(&adapter->rx_mac_learn_lock);
265
266 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
267 if (tmp_fil)
268 qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
269 vlan_id);
270
238 spin_unlock(&adapter->rx_mac_learn_lock); 271 spin_unlock(&adapter->rx_mac_learn_lock);
239 } 272 }
240} 273}
@@ -262,7 +295,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
262 295
263 mac_req = (struct qlcnic_mac_req *)&(req->words[0]); 296 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
264 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; 297 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
265 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); 298 memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
266 299
267 vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; 300 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
268 vlan_req->vlan_id = cpu_to_le16(vlan_id); 301 vlan_req->vlan_id = cpu_to_le16(vlan_id);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 4528f8ec333b..bc05d016c859 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -977,8 +977,8 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
977static int 977static int
978qlcnic_initialize_nic(struct qlcnic_adapter *adapter) 978qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
979{ 979{
980 int err;
981 struct qlcnic_info nic_info; 980 struct qlcnic_info nic_info;
981 int err = 0;
982 982
983 memset(&nic_info, 0, sizeof(struct qlcnic_info)); 983 memset(&nic_info, 0, sizeof(struct qlcnic_info));
984 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); 984 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
@@ -993,7 +993,9 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
993 993
994 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { 994 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
995 u32 temp; 995 u32 temp;
996 temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); 996 temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err);
997 if (err == -EIO)
998 return err;
997 adapter->ahw->extra_capability[0] = temp; 999 adapter->ahw->extra_capability[0] = temp;
998 } 1000 }
999 adapter->ahw->max_mac_filters = nic_info.max_mac_filters; 1001 adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
@@ -1383,6 +1385,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1383 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { 1385 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1384 if (qlcnic_82xx_check(adapter)) 1386 if (qlcnic_82xx_check(adapter))
1385 handler = qlcnic_tmp_intr; 1387 handler = qlcnic_tmp_intr;
1388 else
1389 handler = qlcnic_83xx_tmp_intr;
1386 if (!QLCNIC_IS_MSI_FAMILY(adapter)) 1390 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1387 flags |= IRQF_SHARED; 1391 flags |= IRQF_SHARED;
1388 1392
@@ -1531,12 +1535,12 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1531 if (netdev->features & NETIF_F_LRO) 1535 if (netdev->features & NETIF_F_LRO)
1532 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); 1536 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1533 1537
1538 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1534 qlcnic_napi_enable(adapter); 1539 qlcnic_napi_enable(adapter);
1535 1540
1536 qlcnic_linkevent_request(adapter, 1); 1541 qlcnic_linkevent_request(adapter, 1);
1537 1542
1538 adapter->ahw->reset_context = 0; 1543 adapter->ahw->reset_context = 0;
1539 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1540 return 0; 1544 return 0;
1541} 1545}
1542 1546
@@ -2139,7 +2143,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2139 if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x && 2143 if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
2140 !!qlcnic_use_msi) 2144 !!qlcnic_use_msi)
2141 dev_warn(&pdev->dev, 2145 dev_warn(&pdev->dev,
2142 "83xx adapter do not support MSI interrupts\n"); 2146 "Device does not support MSI interrupts\n");
2143 2147
2144 err = qlcnic_setup_intr(adapter, 0); 2148 err = qlcnic_setup_intr(adapter, 0);
2145 if (err) { 2149 if (err) {
@@ -2161,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2161 if (err) 2165 if (err)
2162 goto err_out_disable_mbx_intr; 2166 goto err_out_disable_mbx_intr;
2163 2167
2164 qlcnic_set_drv_version(adapter); 2168 if (adapter->portnum == 0)
2169 qlcnic_set_drv_version(adapter);
2165 2170
2166 pci_set_drvdata(pdev, adapter); 2171 pci_set_drvdata(pdev, adapter);
2167 2172
@@ -3081,7 +3086,8 @@ done:
3081 adapter->fw_fail_cnt = 0; 3086 adapter->fw_fail_cnt = 0;
3082 adapter->flags &= ~QLCNIC_FW_HANG; 3087 adapter->flags &= ~QLCNIC_FW_HANG;
3083 clear_bit(__QLCNIC_RESETTING, &adapter->state); 3088 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3084 qlcnic_set_drv_version(adapter); 3089 if (adapter->portnum == 0)
3090 qlcnic_set_drv_version(adapter);
3085 3091
3086 if (!qlcnic_clr_drv_state(adapter)) 3092 if (!qlcnic_clr_drv_state(adapter))
3087 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, 3093 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
@@ -3093,6 +3099,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
3093{ 3099{
3094 u32 state = 0, heartbeat; 3100 u32 state = 0, heartbeat;
3095 u32 peg_status; 3101 u32 peg_status;
3102 int err = 0;
3096 3103
3097 if (qlcnic_check_temp(adapter)) 3104 if (qlcnic_check_temp(adapter))
3098 goto detach; 3105 goto detach;
@@ -3139,11 +3146,11 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
3139 "PEG_NET_4_PC: 0x%x\n", 3146 "PEG_NET_4_PC: 0x%x\n",
3140 peg_status, 3147 peg_status,
3141 QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2), 3148 QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
3142 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c), 3149 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err),
3143 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c), 3150 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err),
3144 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c), 3151 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err),
3145 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c), 3152 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err),
3146 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c)); 3153 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err));
3147 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) 3154 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
3148 dev_err(&adapter->pdev->dev, 3155 dev_err(&adapter->pdev->dev,
3149 "Firmware aborted with error code 0x00006700. " 3156 "Firmware aborted with error code 0x00006700. "
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index ab8a6744d402..79e54efe07b9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1084,7 +1084,7 @@ flash_temp:
1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr; 1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
1085 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; 1085 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
1086 1086
1087 if ((tmpl_hdr->version & 0xffffff) >= 0x20001) 1087 if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
1088 ahw->fw_dump.use_pex_dma = true; 1088 ahw->fw_dump.use_pex_dma = true;
1089 else 1089 else
1090 ahw->fw_dump.use_pex_dma = false; 1090 ahw->fw_dump.use_pex_dma = false;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 62380ce89905..5d40045b3cea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -562,7 +562,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
562 INIT_LIST_HEAD(&adapter->vf_mc_list); 562 INIT_LIST_HEAD(&adapter->vf_mc_list);
563 if (!qlcnic_use_msi_x && !!qlcnic_use_msi) 563 if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
564 dev_warn(&adapter->pdev->dev, 564 dev_warn(&adapter->pdev->dev,
565 "83xx adapter do not support MSI interrupts\n"); 565 "Device does not support MSI interrupts\n");
566 566
567 err = qlcnic_setup_intr(adapter, 1); 567 err = qlcnic_setup_intr(adapter, 1);
568 if (err) { 568 if (err) {
@@ -762,6 +762,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
762 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 762 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
763 mbx->req.arg[0] = (type | (mbx->req.num << 16) | 763 mbx->req.arg[0] = (type | (mbx->req.num << 16) |
764 (3 << 29)); 764 (3 << 29));
765 mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
765 return 0; 766 return 0;
766 } 767 }
767 } 768 }
@@ -813,6 +814,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
813 cmd->req.num = trans->req_pay_size / 4; 814 cmd->req.num = trans->req_pay_size / 4;
814 cmd->rsp.num = trans->rsp_pay_size / 4; 815 cmd->rsp.num = trans->rsp_pay_size / 4;
815 hdr = trans->rsp_hdr; 816 hdr = trans->rsp_hdr;
817 cmd->op_type = trans->req_hdr->op_type;
816 } 818 }
817 819
818 trans->trans_id = seq; 820 trans->trans_id = seq;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index ee0c1d307966..eb49cd65378c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -635,12 +635,12 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
635 struct qlcnic_cmd_args *cmd) 635 struct qlcnic_cmd_args *cmd)
636{ 636{
637 struct qlcnic_vf_info *vf = trans->vf; 637 struct qlcnic_vf_info *vf = trans->vf;
638 struct qlcnic_adapter *adapter = vf->adapter; 638 struct qlcnic_vport *vp = vf->vp;
639 int err; 639 struct qlcnic_adapter *adapter;
640 u16 func = vf->pci_func; 640 u16 func = vf->pci_func;
641 int err;
641 642
642 cmd->rsp.arg[0] = trans->req_hdr->cmd_op; 643 adapter = vf->adapter;
643 cmd->rsp.arg[0] |= (1 << 16);
644 644
645 if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) { 645 if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
646 err = qlcnic_sriov_pf_config_vport(adapter, 1, func); 646 err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@ -650,6 +650,8 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
650 qlcnic_sriov_pf_config_vport(adapter, 0, func); 650 qlcnic_sriov_pf_config_vport(adapter, 0, func);
651 } 651 }
652 } else { 652 } else {
653 if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
654 vp->vlan = 0;
653 err = qlcnic_sriov_pf_config_vport(adapter, 0, func); 655 err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
654 } 656 }
655 657
@@ -1183,7 +1185,7 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
1183 u8 cmd_op, mode = vp->vlan_mode; 1185 u8 cmd_op, mode = vp->vlan_mode;
1184 1186
1185 cmd_op = trans->req_hdr->cmd_op; 1187 cmd_op = trans->req_hdr->cmd_op;
1186 cmd->rsp.arg[0] = (cmd_op & 0xffff) | 14 << 16 | 1 << 25; 1188 cmd->rsp.arg[0] |= 1 << 25;
1187 1189
1188 switch (mode) { 1190 switch (mode) {
1189 case QLC_GUEST_VLAN_MODE: 1191 case QLC_GUEST_VLAN_MODE:
@@ -1561,6 +1563,7 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
1561 struct qlcnic_vf_info *vf) 1563 struct qlcnic_vf_info *vf)
1562{ 1564{
1563 struct net_device *dev = vf->adapter->netdev; 1565 struct net_device *dev = vf->adapter->netdev;
1566 struct qlcnic_vport *vp = vf->vp;
1564 1567
1565 if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) { 1568 if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
1566 clear_bit(QLC_BC_VF_FLR, &vf->state); 1569 clear_bit(QLC_BC_VF_FLR, &vf->state);
@@ -1573,6 +1576,9 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
1573 return; 1576 return;
1574 } 1577 }
1575 1578
1579 if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
1580 vp->vlan = 0;
1581
1576 qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); 1582 qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
1577 netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func); 1583 netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
1578} 1584}
@@ -1621,13 +1627,15 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1621{ 1627{
1622 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1628 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1623 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1629 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1624 int i, num_vfs = sriov->num_vfs; 1630 int i, num_vfs;
1625 struct qlcnic_vf_info *vf_info; 1631 struct qlcnic_vf_info *vf_info;
1626 u8 *curr_mac; 1632 u8 *curr_mac;
1627 1633
1628 if (!qlcnic_sriov_pf_check(adapter)) 1634 if (!qlcnic_sriov_pf_check(adapter))
1629 return -EOPNOTSUPP; 1635 return -EOPNOTSUPP;
1630 1636
1637 num_vfs = sriov->num_vfs;
1638
1631 if (!is_valid_ether_addr(mac) || vf >= num_vfs) 1639 if (!is_valid_ether_addr(mac) || vf >= num_vfs)
1632 return -EINVAL; 1640 return -EINVAL;
1633 1641
@@ -1741,6 +1749,7 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
1741 1749
1742 switch (vlan) { 1750 switch (vlan) {
1743 case 4095: 1751 case 4095:
1752 vp->vlan = 0;
1744 vp->vlan_mode = QLC_GUEST_VLAN_MODE; 1753 vp->vlan_mode = QLC_GUEST_VLAN_MODE;
1745 break; 1754 break;
1746 case 0: 1755 case 0:
@@ -1759,6 +1768,29 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
1759 return 0; 1768 return 0;
1760} 1769}
1761 1770
1771static inline __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
1772 struct qlcnic_vport *vp, int vf)
1773{
1774 __u32 vlan = 0;
1775
1776 switch (vp->vlan_mode) {
1777 case QLC_PVID_MODE:
1778 vlan = vp->vlan;
1779 break;
1780 case QLC_GUEST_VLAN_MODE:
1781 vlan = MAX_VLAN_ID;
1782 break;
1783 case QLC_NO_VLAN_MODE:
1784 vlan = 0;
1785 break;
1786 default:
1787 netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n",
1788 vp->vlan_mode, vf);
1789 }
1790
1791 return vlan;
1792}
1793
1762int qlcnic_sriov_get_vf_config(struct net_device *netdev, 1794int qlcnic_sriov_get_vf_config(struct net_device *netdev,
1763 int vf, struct ifla_vf_info *ivi) 1795 int vf, struct ifla_vf_info *ivi)
1764{ 1796{
@@ -1774,7 +1806,7 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
1774 1806
1775 vp = sriov->vf_info[vf].vp; 1807 vp = sriov->vf_info[vf].vp;
1776 memcpy(&ivi->mac, vp->mac, ETH_ALEN); 1808 memcpy(&ivi->mac, vp->mac, ETH_ALEN);
1777 ivi->vlan = vp->vlan; 1809 ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf);
1778 ivi->qos = vp->qos; 1810 ivi->qos = vp->qos;
1779 ivi->spoofchk = vp->spoofchk; 1811 ivi->spoofchk = vp->spoofchk;
1780 if (vp->max_tx_bw == MAX_BW) 1812 if (vp->max_tx_bw == MAX_BW)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 10ed82b3baca..660c3f5b2237 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
170 170
171 if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { 171 if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
172 err = qlcnic_get_beacon_state(adapter, &h_beacon_state); 172 err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
173 if (!err) { 173 if (err) {
174 dev_info(&adapter->pdev->dev, 174 netdev_err(adapter->netdev,
175 "Failed to get current beacon state\n"); 175 "Failed to get current beacon state\n");
176 } else { 176 } else {
177 if (h_beacon_state == QLCNIC_BEACON_DISABLE) 177 if (h_beacon_state == QLCNIC_BEACON_DISABLE)
178 ahw->beacon_state = 0; 178 ahw->beacon_state = 0;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e6acb9fa5767..d2e591955bdd 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -478,7 +478,7 @@ rx_status_loop:
478 478
479 while (1) { 479 while (1) {
480 u32 status, len; 480 u32 status, len;
481 dma_addr_t mapping; 481 dma_addr_t mapping, new_mapping;
482 struct sk_buff *skb, *new_skb; 482 struct sk_buff *skb, *new_skb;
483 struct cp_desc *desc; 483 struct cp_desc *desc;
484 const unsigned buflen = cp->rx_buf_sz; 484 const unsigned buflen = cp->rx_buf_sz;
@@ -520,6 +520,14 @@ rx_status_loop:
520 goto rx_next; 520 goto rx_next;
521 } 521 }
522 522
523 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
524 PCI_DMA_FROMDEVICE);
525 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
526 dev->stats.rx_dropped++;
527 kfree_skb(new_skb);
528 goto rx_next;
529 }
530
523 dma_unmap_single(&cp->pdev->dev, mapping, 531 dma_unmap_single(&cp->pdev->dev, mapping,
524 buflen, PCI_DMA_FROMDEVICE); 532 buflen, PCI_DMA_FROMDEVICE);
525 533
@@ -531,12 +539,11 @@ rx_status_loop:
531 539
532 skb_put(skb, len); 540 skb_put(skb, len);
533 541
534 mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
535 PCI_DMA_FROMDEVICE);
536 cp->rx_skb[rx_tail] = new_skb; 542 cp->rx_skb[rx_tail] = new_skb;
537 543
538 cp_rx_skb(cp, skb, desc); 544 cp_rx_skb(cp, skb, desc);
539 rx++; 545 rx++;
546 mapping = new_mapping;
540 547
541rx_next: 548rx_next:
542 cp->rx_ring[rx_tail].opts2 = 0; 549 cp->rx_ring[rx_tail].opts2 = 0;
@@ -716,6 +723,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
716 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 723 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
717} 724}
718 725
726static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
727 int first, int entry_last)
728{
729 int frag, index;
730 struct cp_desc *txd;
731 skb_frag_t *this_frag;
732 for (frag = 0; frag+first < entry_last; frag++) {
733 index = first+frag;
734 cp->tx_skb[index] = NULL;
735 txd = &cp->tx_ring[index];
736 this_frag = &skb_shinfo(skb)->frags[frag];
737 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
738 skb_frag_size(this_frag), PCI_DMA_TODEVICE);
739 }
740}
741
719static netdev_tx_t cp_start_xmit (struct sk_buff *skb, 742static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
720 struct net_device *dev) 743 struct net_device *dev)
721{ 744{
@@ -749,6 +772,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
749 772
750 len = skb->len; 773 len = skb->len;
751 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); 774 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
775 if (dma_mapping_error(&cp->pdev->dev, mapping))
776 goto out_dma_error;
777
752 txd->opts2 = opts2; 778 txd->opts2 = opts2;
753 txd->addr = cpu_to_le64(mapping); 779 txd->addr = cpu_to_le64(mapping);
754 wmb(); 780 wmb();
@@ -786,6 +812,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
786 first_len = skb_headlen(skb); 812 first_len = skb_headlen(skb);
787 first_mapping = dma_map_single(&cp->pdev->dev, skb->data, 813 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
788 first_len, PCI_DMA_TODEVICE); 814 first_len, PCI_DMA_TODEVICE);
815 if (dma_mapping_error(&cp->pdev->dev, first_mapping))
816 goto out_dma_error;
817
789 cp->tx_skb[entry] = skb; 818 cp->tx_skb[entry] = skb;
790 entry = NEXT_TX(entry); 819 entry = NEXT_TX(entry);
791 820
@@ -799,6 +828,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
799 mapping = dma_map_single(&cp->pdev->dev, 828 mapping = dma_map_single(&cp->pdev->dev,
800 skb_frag_address(this_frag), 829 skb_frag_address(this_frag),
801 len, PCI_DMA_TODEVICE); 830 len, PCI_DMA_TODEVICE);
831 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
832 unwind_tx_frag_mapping(cp, skb, first_entry, entry);
833 goto out_dma_error;
834 }
835
802 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 836 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
803 837
804 ctrl = eor | len | DescOwn; 838 ctrl = eor | len | DescOwn;
@@ -859,11 +893,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
859 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 893 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
860 netif_stop_queue(dev); 894 netif_stop_queue(dev);
861 895
896out_unlock:
862 spin_unlock_irqrestore(&cp->lock, intr_flags); 897 spin_unlock_irqrestore(&cp->lock, intr_flags);
863 898
864 cpw8(TxPoll, NormalTxPoll); 899 cpw8(TxPoll, NormalTxPoll);
865 900
866 return NETDEV_TX_OK; 901 return NETDEV_TX_OK;
902out_dma_error:
903 kfree_skb(skb);
904 cp->dev->stats.tx_dropped++;
905 goto out_unlock;
867} 906}
868 907
869/* Set or clear the multicast filter for this adaptor. 908/* Set or clear the multicast filter for this adaptor.
@@ -1054,6 +1093,10 @@ static int cp_refill_rx(struct cp_private *cp)
1054 1093
1055 mapping = dma_map_single(&cp->pdev->dev, skb->data, 1094 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1056 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1095 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1096 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1097 kfree_skb(skb);
1098 goto err_out;
1099 }
1057 cp->rx_skb[i] = skb; 1100 cp->rx_skb[i] = skb;
1058 1101
1059 cp->rx_ring[i].opts2 = 0; 1102 cp->rx_ring[i].opts2 = 0;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4106a743ca74..85e5c97191dd 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3689,7 +3689,7 @@ static void rtl_phy_work(struct rtl8169_private *tp)
3689 if (tp->link_ok(ioaddr)) 3689 if (tp->link_ok(ioaddr))
3690 return; 3690 return;
3691 3691
3692 netif_warn(tp, link, tp->dev, "PHY reset until link up\n"); 3692 netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
3693 3693
3694 tp->phy_reset_enable(tp); 3694 tp->phy_reset_enable(tp);
3695 3695
@@ -6468,6 +6468,8 @@ static int rtl8169_close(struct net_device *dev)
6468 rtl8169_down(dev); 6468 rtl8169_down(dev);
6469 rtl_unlock_work(tp); 6469 rtl_unlock_work(tp);
6470 6470
6471 cancel_work_sync(&tp->wk.work);
6472
6471 free_irq(pdev->irq, dev); 6473 free_irq(pdev->irq, dev);
6472 6474
6473 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, 6475 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
@@ -6793,8 +6795,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
6793 rtl8168_driver_stop(tp); 6795 rtl8168_driver_stop(tp);
6794 } 6796 }
6795 6797
6796 cancel_work_sync(&tp->wk.work);
6797
6798 netif_napi_del(&tp->napi); 6798 netif_napi_del(&tp->napi);
6799 6799
6800 unregister_netdev(dev); 6800 unregister_netdev(dev);
@@ -7088,7 +7088,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7088 7088
7089 RTL_W8(Cfg9346, Cfg9346_Unlock); 7089 RTL_W8(Cfg9346, Cfg9346_Unlock);
7090 RTL_W8(Config1, RTL_R8(Config1) | PMEnable); 7090 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
7091 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); 7091 RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
7092 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) 7092 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
7093 tp->features |= RTL_FEATURE_WOL; 7093 tp->features |= RTL_FEATURE_WOL;
7094 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) 7094 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index b74a60ab9ac7..30d744235d27 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -675,7 +675,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); 675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != 676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); 677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
678 rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF; 678 rep_index = spec->type - EFX_FILTER_UC_DEF;
679 ins_index = rep_index; 679 ins_index = rep_index;
680 680
681 spin_lock_bh(&state->lock); 681 spin_lock_bh(&state->lock);
@@ -1209,7 +1209,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1209 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); 1209 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1210 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 1210 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1211 1211
1212 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index); 1212 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
1213 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
1214 rxq_index);
1213 rc = efx_filter_set_ipv4_full(&spec, ip->protocol, 1215 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1214 ip->daddr, ports[1], ip->saddr, ports[0]); 1216 ip->daddr, ports[1], ip->saddr, ports[0]);
1215 if (rc) 1217 if (rc)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index eb4aea3fe793..f5d7ad75e479 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1318,7 +1318,7 @@ static void sis900_timer(unsigned long data)
1318 if (duplex){ 1318 if (duplex){
1319 sis900_set_mode(sis_priv, speed, duplex); 1319 sis900_set_mode(sis_priv, speed, duplex);
1320 sis630_set_eq(net_dev, sis_priv->chipset_rev); 1320 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1321 netif_start_queue(net_dev); 1321 netif_carrier_on(net_dev);
1322 } 1322 }
1323 1323
1324 sis_priv->timer.expires = jiffies + HZ; 1324 sis_priv->timer.expires = jiffies + HZ;
@@ -1336,10 +1336,8 @@ static void sis900_timer(unsigned long data)
1336 status = sis900_default_phy(net_dev); 1336 status = sis900_default_phy(net_dev);
1337 mii_phy = sis_priv->mii; 1337 mii_phy = sis_priv->mii;
1338 1338
1339 if (status & MII_STAT_LINK){ 1339 if (status & MII_STAT_LINK)
1340 sis900_check_mode(net_dev, mii_phy); 1340 sis900_check_mode(net_dev, mii_phy);
1341 netif_carrier_on(net_dev);
1342 }
1343 } else { 1341 } else {
1344 /* Link ON -> OFF */ 1342 /* Link ON -> OFF */
1345 if (!(status & MII_STAT_LINK)){ 1343 if (!(status & MII_STAT_LINK)){
@@ -1612,12 +1610,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1612 unsigned int index_cur_tx, index_dirty_tx; 1610 unsigned int index_cur_tx, index_dirty_tx;
1613 unsigned int count_dirty_tx; 1611 unsigned int count_dirty_tx;
1614 1612
1615 /* Don't transmit data before the complete of auto-negotiation */
1616 if(!sis_priv->autong_complete){
1617 netif_stop_queue(net_dev);
1618 return NETDEV_TX_BUSY;
1619 }
1620
1621 spin_lock_irqsave(&sis_priv->lock, flags); 1613 spin_lock_irqsave(&sis_priv->lock, flags);
1622 1614
1623 /* Calculate the next Tx descriptor entry. */ 1615 /* Calculate the next Tx descriptor entry. */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index c9d942a5c335..1ef9d8a555aa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
33 struct stmmac_priv *priv = (struct stmmac_priv *)p; 33 struct stmmac_priv *priv = (struct stmmac_priv *)p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
35 unsigned int entry = priv->cur_tx % txsize; 35 unsigned int entry = priv->cur_tx % txsize;
36 struct dma_desc *desc = priv->dma_tx + entry; 36 struct dma_desc *desc;
37 unsigned int nopaged_len = skb_headlen(skb); 37 unsigned int nopaged_len = skb_headlen(skb);
38 unsigned int bmax, len; 38 unsigned int bmax, len;
39 39
40 if (priv->extend_desc)
41 desc = (struct dma_desc *)(priv->dma_etx + entry);
42 else
43 desc = priv->dma_tx + entry;
44
40 if (priv->plat->enh_desc) 45 if (priv->plat->enh_desc)
41 bmax = BUF_SIZE_8KiB; 46 bmax = BUF_SIZE_8KiB;
42 else 47 else
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
54 STMMAC_RING_MODE); 59 STMMAC_RING_MODE);
55 wmb(); 60 wmb();
56 entry = (++priv->cur_tx) % txsize; 61 entry = (++priv->cur_tx) % txsize;
57 desc = priv->dma_tx + entry; 62
63 if (priv->extend_desc)
64 desc = (struct dma_desc *)(priv->dma_etx + entry);
65 else
66 desc = priv->dma_tx + entry;
58 67
59 desc->des2 = dma_map_single(priv->device, skb->data + bmax, 68 desc->des2 = dma_map_single(priv->device, skb->data + bmax,
60 len, DMA_TO_DEVICE); 69 len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f2ccb36e8685..0a9bb9d30c3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
939 939
940 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, 940 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
941 GFP_KERNEL); 941 GFP_KERNEL);
942 if (unlikely(skb == NULL)) { 942 if (!skb) {
943 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 943 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
944 return 1; 944 return -ENOMEM;
945 } 945 }
946 skb_reserve(skb, NET_IP_ALIGN); 946 skb_reserve(skb, NET_IP_ALIGN);
947 priv->rx_skbuff[i] = skb; 947 priv->rx_skbuff[i] = skb;
948 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 948 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
949 priv->dma_buf_sz, 949 priv->dma_buf_sz,
950 DMA_FROM_DEVICE); 950 DMA_FROM_DEVICE);
951 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
952 pr_err("%s: DMA mapping error\n", __func__);
953 dev_kfree_skb_any(skb);
954 return -EINVAL;
955 }
951 956
952 p->des2 = priv->rx_skbuff_dma[i]; 957 p->des2 = priv->rx_skbuff_dma[i];
953 958
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
958 return 0; 963 return 0;
959} 964}
960 965
966static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
967{
968 if (priv->rx_skbuff[i]) {
969 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
970 priv->dma_buf_sz, DMA_FROM_DEVICE);
971 dev_kfree_skb_any(priv->rx_skbuff[i]);
972 }
973 priv->rx_skbuff[i] = NULL;
974}
975
961/** 976/**
962 * init_dma_desc_rings - init the RX/TX descriptor rings 977 * init_dma_desc_rings - init the RX/TX descriptor rings
963 * @dev: net device structure 978 * @dev: net device structure
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
965 * and allocates the socket buffers. It suppors the chained and ring 980 * and allocates the socket buffers. It suppors the chained and ring
966 * modes. 981 * modes.
967 */ 982 */
968static void init_dma_desc_rings(struct net_device *dev) 983static int init_dma_desc_rings(struct net_device *dev)
969{ 984{
970 int i; 985 int i;
971 struct stmmac_priv *priv = netdev_priv(dev); 986 struct stmmac_priv *priv = netdev_priv(dev);
972 unsigned int txsize = priv->dma_tx_size; 987 unsigned int txsize = priv->dma_tx_size;
973 unsigned int rxsize = priv->dma_rx_size; 988 unsigned int rxsize = priv->dma_rx_size;
974 unsigned int bfsize = 0; 989 unsigned int bfsize = 0;
990 int ret = -ENOMEM;
975 991
976 /* Set the max buffer size according to the DESC mode 992 /* Set the max buffer size according to the DESC mode
977 * and the MTU. Note that RING mode allows 16KiB bsize. 993 * and the MTU. Note that RING mode allows 16KiB bsize.
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev)
992 dma_extended_desc), 1008 dma_extended_desc),
993 &priv->dma_rx_phy, 1009 &priv->dma_rx_phy,
994 GFP_KERNEL); 1010 GFP_KERNEL);
1011 if (!priv->dma_erx)
1012 goto err_dma;
1013
995 priv->dma_etx = dma_alloc_coherent(priv->device, txsize * 1014 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
996 sizeof(struct 1015 sizeof(struct
997 dma_extended_desc), 1016 dma_extended_desc),
998 &priv->dma_tx_phy, 1017 &priv->dma_tx_phy,
999 GFP_KERNEL); 1018 GFP_KERNEL);
1000 if ((!priv->dma_erx) || (!priv->dma_etx)) 1019 if (!priv->dma_etx) {
1001 return; 1020 dma_free_coherent(priv->device, priv->dma_rx_size *
1021 sizeof(struct dma_extended_desc),
1022 priv->dma_erx, priv->dma_rx_phy);
1023 goto err_dma;
1024 }
1002 } else { 1025 } else {
1003 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * 1026 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
1004 sizeof(struct dma_desc), 1027 sizeof(struct dma_desc),
1005 &priv->dma_rx_phy, 1028 &priv->dma_rx_phy,
1006 GFP_KERNEL); 1029 GFP_KERNEL);
1030 if (!priv->dma_rx)
1031 goto err_dma;
1032
1007 priv->dma_tx = dma_alloc_coherent(priv->device, txsize * 1033 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
1008 sizeof(struct dma_desc), 1034 sizeof(struct dma_desc),
1009 &priv->dma_tx_phy, 1035 &priv->dma_tx_phy,
1010 GFP_KERNEL); 1036 GFP_KERNEL);
1011 if ((!priv->dma_rx) || (!priv->dma_tx)) 1037 if (!priv->dma_tx) {
1012 return; 1038 dma_free_coherent(priv->device, priv->dma_rx_size *
1039 sizeof(struct dma_desc),
1040 priv->dma_rx, priv->dma_rx_phy);
1041 goto err_dma;
1042 }
1013 } 1043 }
1014 1044
1015 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), 1045 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
1016 GFP_KERNEL); 1046 GFP_KERNEL);
1047 if (!priv->rx_skbuff_dma)
1048 goto err_rx_skbuff_dma;
1049
1017 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), 1050 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
1018 GFP_KERNEL); 1051 GFP_KERNEL);
1052 if (!priv->rx_skbuff)
1053 goto err_rx_skbuff;
1054
1019 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), 1055 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
1020 GFP_KERNEL); 1056 GFP_KERNEL);
1057 if (!priv->tx_skbuff_dma)
1058 goto err_tx_skbuff_dma;
1059
1021 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), 1060 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1022 GFP_KERNEL); 1061 GFP_KERNEL);
1062 if (!priv->tx_skbuff)
1063 goto err_tx_skbuff;
1064
1023 if (netif_msg_probe(priv)) { 1065 if (netif_msg_probe(priv)) {
1024 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, 1066 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1025 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); 1067 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev)
1034 else 1076 else
1035 p = priv->dma_rx + i; 1077 p = priv->dma_rx + i;
1036 1078
1037 if (stmmac_init_rx_buffers(priv, p, i)) 1079 ret = stmmac_init_rx_buffers(priv, p, i);
1038 break; 1080 if (ret)
1081 goto err_init_rx_buffers;
1039 1082
1040 if (netif_msg_probe(priv)) 1083 if (netif_msg_probe(priv))
1041 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 1084 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev)
1081 1124
1082 if (netif_msg_hw(priv)) 1125 if (netif_msg_hw(priv))
1083 stmmac_display_rings(priv); 1126 stmmac_display_rings(priv);
1127
1128 return 0;
1129err_init_rx_buffers:
1130 while (--i >= 0)
1131 stmmac_free_rx_buffers(priv, i);
1132 kfree(priv->tx_skbuff);
1133err_tx_skbuff:
1134 kfree(priv->tx_skbuff_dma);
1135err_tx_skbuff_dma:
1136 kfree(priv->rx_skbuff);
1137err_rx_skbuff:
1138 kfree(priv->rx_skbuff_dma);
1139err_rx_skbuff_dma:
1140 if (priv->extend_desc) {
1141 dma_free_coherent(priv->device, priv->dma_tx_size *
1142 sizeof(struct dma_extended_desc),
1143 priv->dma_etx, priv->dma_tx_phy);
1144 dma_free_coherent(priv->device, priv->dma_rx_size *
1145 sizeof(struct dma_extended_desc),
1146 priv->dma_erx, priv->dma_rx_phy);
1147 } else {
1148 dma_free_coherent(priv->device,
1149 priv->dma_tx_size * sizeof(struct dma_desc),
1150 priv->dma_tx, priv->dma_tx_phy);
1151 dma_free_coherent(priv->device,
1152 priv->dma_rx_size * sizeof(struct dma_desc),
1153 priv->dma_rx, priv->dma_rx_phy);
1154 }
1155err_dma:
1156 return ret;
1084} 1157}
1085 1158
1086static void dma_free_rx_skbufs(struct stmmac_priv *priv) 1159static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1087{ 1160{
1088 int i; 1161 int i;
1089 1162
1090 for (i = 0; i < priv->dma_rx_size; i++) { 1163 for (i = 0; i < priv->dma_rx_size; i++)
1091 if (priv->rx_skbuff[i]) { 1164 stmmac_free_rx_buffers(priv, i);
1092 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
1093 priv->dma_buf_sz, DMA_FROM_DEVICE);
1094 dev_kfree_skb_any(priv->rx_skbuff[i]);
1095 }
1096 priv->rx_skbuff[i] = NULL;
1097 }
1098} 1165}
1099 1166
1100static void dma_free_tx_skbufs(struct stmmac_priv *priv) 1167static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev)
1560 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 1627 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1561 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 1628 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1562 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 1629 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1563 init_dma_desc_rings(dev); 1630
1631 ret = init_dma_desc_rings(dev);
1632 if (ret < 0) {
1633 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1634 goto dma_desc_error;
1635 }
1564 1636
1565 /* DMA initialization and SW reset */ 1637 /* DMA initialization and SW reset */
1566 ret = stmmac_init_dma_engine(priv); 1638 ret = stmmac_init_dma_engine(priv);
1567 if (ret < 0) { 1639 if (ret < 0) {
1568 pr_err("%s: DMA initialization failed\n", __func__); 1640 pr_err("%s: DMA engine initialization failed\n", __func__);
1569 goto init_error; 1641 goto init_error;
1570 } 1642 }
1571 1643
@@ -1672,6 +1744,7 @@ wolirq_error:
1672 1744
1673init_error: 1745init_error:
1674 free_dma_desc_resources(priv); 1746 free_dma_desc_resources(priv);
1747dma_desc_error:
1675 if (priv->phydev) 1748 if (priv->phydev)
1676 phy_disconnect(priv->phydev); 1749 phy_disconnect(priv->phydev);
1677phy_error: 1750phy_error:
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 05a1674e204f..22a7a4336211 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1867,7 +1867,7 @@ static int cpsw_probe(struct platform_device *pdev)
1867 1867
1868 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 1868 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1869 for (i = res->start; i <= res->end; i++) { 1869 for (i = res->start; i <= res->end; i++) {
1870 if (request_irq(i, cpsw_interrupt, IRQF_DISABLED, 1870 if (request_irq(i, cpsw_interrupt, 0,
1871 dev_name(&pdev->dev), priv)) { 1871 dev_name(&pdev->dev), priv)) {
1872 dev_err(priv->dev, "error attaching irq\n"); 1872 dev_err(priv->dev, "error attaching irq\n");
1873 goto clean_ale_ret; 1873 goto clean_ale_ret;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 07b176bcf929..1a222bce4bd7 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1568,8 +1568,7 @@ static int emac_dev_open(struct net_device *ndev)
1568 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 1568 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1569 for (i = res->start; i <= res->end; i++) { 1569 for (i = res->start; i <= res->end; i++) {
1570 if (devm_request_irq(&priv->pdev->dev, i, emac_irq, 1570 if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
1571 IRQF_DISABLED, 1571 0, ndev->name, ndev))
1572 ndev->name, ndev))
1573 goto rollback; 1572 goto rollback;
1574 } 1573 }
1575 k++; 1574 k++;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 1d6dc41f755d..d01cacf8a7c2 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2100 2100
2101 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 2101 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2102 } 2102 }
2103 netif_rx(skb); 2103 netif_receive_skb(skb);
2104 2104
2105 stats->rx_bytes += pkt_len; 2105 stats->rx_bytes += pkt_len;
2106 stats->rx_packets++; 2106 stats->rx_packets++;
@@ -2884,6 +2884,7 @@ out:
2884 return ret; 2884 return ret;
2885 2885
2886err_iounmap: 2886err_iounmap:
2887 netif_napi_del(&vptr->napi);
2887 iounmap(regs); 2888 iounmap(regs);
2888err_free_dev: 2889err_free_dev:
2889 free_netdev(netdev); 2890 free_netdev(netdev);
@@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev)
2904 struct velocity_info *vptr = netdev_priv(netdev); 2905 struct velocity_info *vptr = netdev_priv(netdev);
2905 2906
2906 unregister_netdev(netdev); 2907 unregister_netdev(netdev);
2908 netif_napi_del(&vptr->napi);
2907 iounmap(vptr->mac_regs); 2909 iounmap(vptr->mac_regs);
2908 free_netdev(netdev); 2910 free_netdev(netdev);
2909 velocity_nics--; 2911 velocity_nics--;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 51f2bc376101..2dcc60fb37f1 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); 210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
211 pci_write_config_byte(pcidev,0x5a,0xc0); 211 pci_write_config_byte(pcidev,0x5a,0xc0);
212 WriteLPCReg(0x28, 0x70 ); 212 WriteLPCReg(0x28, 0x70 );
213 if (via_ircc_open(pcidev, &info, 0x3076) == 0) 213 rc = via_ircc_open(pcidev, &info, 0x3076);
214 rc=0;
215 } else 214 } else
216 rc = -ENODEV; //IR not turn on 215 rc = -ENODEV; //IR not turn on
217 } else { //Not VT1211 216 } else { //Not VT1211
@@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
249 info.irq=FirIRQ; 248 info.irq=FirIRQ;
250 info.dma=FirDRQ1; 249 info.dma=FirDRQ1;
251 info.dma2=FirDRQ0; 250 info.dma2=FirDRQ0;
252 if (via_ircc_open(pcidev, &info, 0x3096) == 0) 251 rc = via_ircc_open(pcidev, &info, 0x3096);
253 rc=0;
254 } else 252 } else
255 rc = -ENODEV; //IR not turn on !!!!! 253 rc = -ENODEV; //IR not turn on !!!!!
256 }//Not VT1211 254 }//Not VT1211
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 18373b6ae37d..16b43bf544b7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -337,8 +337,11 @@ static int macvlan_open(struct net_device *dev)
337 int err; 337 int err;
338 338
339 if (vlan->port->passthru) { 339 if (vlan->port->passthru) {
340 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) 340 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
341 dev_set_promiscuity(lowerdev, 1); 341 err = dev_set_promiscuity(lowerdev, 1);
342 if (err < 0)
343 goto out;
344 }
342 goto hash_add; 345 goto hash_add;
343 } 346 }
344 347
@@ -736,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
736 return -EADDRNOTAVAIL; 739 return -EADDRNOTAVAIL;
737 } 740 }
738 741
742 if (data && data[IFLA_MACVLAN_FLAGS] &&
743 nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
744 return -EINVAL;
745
739 if (data && data[IFLA_MACVLAN_MODE]) { 746 if (data && data[IFLA_MACVLAN_MODE]) {
740 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { 747 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
741 case MACVLAN_MODE_PRIVATE: 748 case MACVLAN_MODE_PRIVATE:
@@ -863,6 +870,18 @@ static int macvlan_changelink(struct net_device *dev,
863 struct nlattr *tb[], struct nlattr *data[]) 870 struct nlattr *tb[], struct nlattr *data[])
864{ 871{
865 struct macvlan_dev *vlan = netdev_priv(dev); 872 struct macvlan_dev *vlan = netdev_priv(dev);
873 enum macvlan_mode mode;
874 bool set_mode = false;
875
876 /* Validate mode, but don't set yet: setting flags may fail. */
877 if (data && data[IFLA_MACVLAN_MODE]) {
878 set_mode = true;
879 mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
880 /* Passthrough mode can't be set or cleared dynamically */
881 if ((mode == MACVLAN_MODE_PASSTHRU) !=
882 (vlan->mode == MACVLAN_MODE_PASSTHRU))
883 return -EINVAL;
884 }
866 885
867 if (data && data[IFLA_MACVLAN_FLAGS]) { 886 if (data && data[IFLA_MACVLAN_FLAGS]) {
868 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 887 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
@@ -879,8 +898,8 @@ static int macvlan_changelink(struct net_device *dev,
879 } 898 }
880 vlan->flags = flags; 899 vlan->flags = flags;
881 } 900 }
882 if (data && data[IFLA_MACVLAN_MODE]) 901 if (set_mode)
883 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 902 vlan->mode = mode;
884 return 0; 903 return 0;
885} 904}
886 905
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a98fb0ed6aef..ea53abb20988 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops;
68#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 68#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
69 NETIF_F_TSO6 | NETIF_F_UFO) 69 NETIF_F_TSO6 | NETIF_F_UFO)
70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
71#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
72
71/* 73/*
72 * RCU usage: 74 * RCU usage:
73 * The macvtap_queue and the macvlan_dev are loosely coupled, the 75 * The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
278{ 280{
279 struct macvlan_dev *vlan = netdev_priv(dev); 281 struct macvlan_dev *vlan = netdev_priv(dev);
280 struct macvtap_queue *q = macvtap_get_queue(dev, skb); 282 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
281 netdev_features_t features; 283 netdev_features_t features = TAP_FEATURES;
284
282 if (!q) 285 if (!q)
283 goto drop; 286 goto drop;
284 287
@@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
287 290
288 skb->dev = dev; 291 skb->dev = dev;
289 /* Apply the forward feature mask so that we perform segmentation 292 /* Apply the forward feature mask so that we perform segmentation
290 * according to users wishes. 293 * according to users wishes. This only works if VNET_HDR is
294 * enabled.
291 */ 295 */
292 features = netif_skb_features(skb) & vlan->tap_features; 296 if (q->flags & IFF_VNET_HDR)
297 features |= vlan->tap_features;
293 if (netif_needs_gso(skb, features)) { 298 if (netif_needs_gso(skb, features)) {
294 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 299 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
295 300
@@ -818,10 +823,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
818 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 823 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
819 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 824 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
820 } 825 }
821 if (vlan) 826 if (vlan) {
827 local_bh_disable();
822 macvlan_start_xmit(skb, vlan->dev); 828 macvlan_start_xmit(skb, vlan->dev);
823 else 829 local_bh_enable();
830 } else {
824 kfree_skb(skb); 831 kfree_skb(skb);
832 }
825 rcu_read_unlock(); 833 rcu_read_unlock();
826 834
827 return total_len; 835 return total_len;
@@ -912,8 +920,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
912done: 920done:
913 rcu_read_lock(); 921 rcu_read_lock();
914 vlan = rcu_dereference(q->vlan); 922 vlan = rcu_dereference(q->vlan);
915 if (vlan) 923 if (vlan) {
924 preempt_disable();
916 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); 925 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
926 preempt_enable();
927 }
917 rcu_read_unlock(); 928 rcu_read_unlock();
918 929
919 return ret ? ret : copied; 930 return ret ? ret : copied;
@@ -1058,8 +1069,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
1058 /* tap_features are the same as features on tun/tap and 1069 /* tap_features are the same as features on tun/tap and
1059 * reflect user expectations. 1070 * reflect user expectations.
1060 */ 1071 */
1061 vlan->tap_features = vlan->dev->features & 1072 vlan->tap_features = feature_mask;
1062 (feature_mask | ~TUN_OFFLOADS);
1063 vlan->set_features = features; 1073 vlan->set_features = features;
1064 netdev_update_features(vlan->dev); 1074 netdev_update_features(vlan->dev);
1065 1075
@@ -1155,10 +1165,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1155 TUN_F_TSO_ECN | TUN_F_UFO)) 1165 TUN_F_TSO_ECN | TUN_F_UFO))
1156 return -EINVAL; 1166 return -EINVAL;
1157 1167
1158 /* TODO: only accept frames with the features that
1159 got enabled for forwarded frames */
1160 if (!(q->flags & IFF_VNET_HDR))
1161 return -EINVAL;
1162 rtnl_lock(); 1168 rtnl_lock();
1163 ret = set_offload(q, arg); 1169 ret = set_offload(q, arg);
1164 rtnl_unlock(); 1170 rtnl_unlock();
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 61d3f4ebf52e..7f25e49ae37f 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -40,7 +40,7 @@ struct sun4i_mdio_data {
40static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 40static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
41{ 41{
42 struct sun4i_mdio_data *data = bus->priv; 42 struct sun4i_mdio_data *data = bus->priv;
43 unsigned long start_jiffies; 43 unsigned long timeout_jiffies;
44 int value; 44 int value;
45 45
46 /* issue the phy address and reg */ 46 /* issue the phy address and reg */
@@ -49,10 +49,9 @@ static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
49 writel(0x1, data->membase + EMAC_MAC_MCMD_REG); 49 writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
50 50
51 /* Wait read complete */ 51 /* Wait read complete */
52 start_jiffies = jiffies; 52 timeout_jiffies = jiffies + MDIO_TIMEOUT;
53 while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { 53 while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
54 if (time_after(start_jiffies, 54 if (time_is_before_jiffies(timeout_jiffies))
55 start_jiffies + MDIO_TIMEOUT))
56 return -ETIMEDOUT; 55 return -ETIMEDOUT;
57 msleep(1); 56 msleep(1);
58 } 57 }
@@ -69,7 +68,7 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
69 u16 value) 68 u16 value)
70{ 69{
71 struct sun4i_mdio_data *data = bus->priv; 70 struct sun4i_mdio_data *data = bus->priv;
72 unsigned long start_jiffies; 71 unsigned long timeout_jiffies;
73 72
74 /* issue the phy address and reg */ 73 /* issue the phy address and reg */
75 writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG); 74 writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG);
@@ -77,10 +76,9 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
77 writel(0x1, data->membase + EMAC_MAC_MCMD_REG); 76 writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
78 77
79 /* Wait read complete */ 78 /* Wait read complete */
80 start_jiffies = jiffies; 79 timeout_jiffies = jiffies + MDIO_TIMEOUT;
81 while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { 80 while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
82 if (time_after(start_jiffies, 81 if (time_is_before_jiffies(timeout_jiffies))
83 start_jiffies + MDIO_TIMEOUT))
84 return -ETIMEDOUT; 82 return -ETIMEDOUT;
85 msleep(1); 83 msleep(1);
86 } 84 }
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 8e7af8354342..138de837977f 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -23,7 +23,7 @@
23#define RTL821x_INER_INIT 0x6400 23#define RTL821x_INER_INIT 0x6400
24#define RTL821x_INSR 0x13 24#define RTL821x_INSR 0x13
25 25
26#define RTL8211E_INER_LINK_STAT 0x10 26#define RTL8211E_INER_LINK_STATUS 0x400
27 27
28MODULE_DESCRIPTION("Realtek PHY driver"); 28MODULE_DESCRIPTION("Realtek PHY driver");
29MODULE_AUTHOR("Johnson Leung"); 29MODULE_AUTHOR("Johnson Leung");
@@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
57 57
58 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 58 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
59 err = phy_write(phydev, RTL821x_INER, 59 err = phy_write(phydev, RTL821x_INER,
60 RTL8211E_INER_LINK_STAT); 60 RTL8211E_INER_LINK_STATUS);
61 else 61 else
62 err = phy_write(phydev, RTL821x_INER, 0); 62 err = phy_write(phydev, RTL821x_INER, 0);
63 63
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index db690a372260..71af122edf2d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1074 u32 rxhash; 1074 u32 rxhash;
1075 1075
1076 if (!(tun->flags & TUN_NO_PI)) { 1076 if (!(tun->flags & TUN_NO_PI)) {
1077 if ((len -= sizeof(pi)) > total_len) 1077 if (len < sizeof(pi))
1078 return -EINVAL; 1078 return -EINVAL;
1079 len -= sizeof(pi);
1079 1080
1080 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) 1081 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
1081 return -EFAULT; 1082 return -EFAULT;
@@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1083 } 1084 }
1084 1085
1085 if (tun->flags & TUN_VNET_HDR) { 1086 if (tun->flags & TUN_VNET_HDR) {
1086 if ((len -= tun->vnet_hdr_sz) > total_len) 1087 if (len < tun->vnet_hdr_sz)
1087 return -EINVAL; 1088 return -EINVAL;
1089 len -= tun->vnet_hdr_sz;
1088 1090
1089 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) 1091 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
1090 return -EFAULT; 1092 return -EFAULT;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 1e3c302d94fe..2bc87e3a8141 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1029,10 +1029,10 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
1029 dev->mii.supports_gmii = 1; 1029 dev->mii.supports_gmii = 1;
1030 1030
1031 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1031 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1032 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; 1032 NETIF_F_RXCSUM;
1033 1033
1034 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1034 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1035 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; 1035 NETIF_F_RXCSUM;
1036 1036
1037 /* Enable checksum offload */ 1037 /* Enable checksum offload */
1038 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | 1038 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
@@ -1173,7 +1173,6 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
1173 if (((skb->len + 8) % frame_size) == 0) 1173 if (((skb->len + 8) % frame_size) == 0)
1174 tx_hdr2 |= 0x80008000; /* Enable padding */ 1174 tx_hdr2 |= 0x80008000; /* Enable padding */
1175 1175
1176 skb_linearize(skb);
1177 headroom = skb_headroom(skb); 1176 headroom = skb_headroom(skb);
1178 tailroom = skb_tailroom(skb); 1177 tailroom = skb_tailroom(skb);
1179 1178
@@ -1317,10 +1316,10 @@ static int ax88179_reset(struct usbnet *dev)
1317 1, 1, tmp); 1316 1, 1, tmp);
1318 1317
1319 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1318 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1320 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; 1319 NETIF_F_RXCSUM;
1321 1320
1322 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1321 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1323 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; 1322 NETIF_F_RXCSUM;
1324 1323
1325 /* Enable checksum offload */ 1324 /* Enable checksum offload */
1326 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | 1325 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index cba1d46e672e..86292e6aaf49 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2816,13 +2816,16 @@ exit:
2816static int hso_get_config_data(struct usb_interface *interface) 2816static int hso_get_config_data(struct usb_interface *interface)
2817{ 2817{
2818 struct usb_device *usbdev = interface_to_usbdev(interface); 2818 struct usb_device *usbdev = interface_to_usbdev(interface);
2819 u8 config_data[17]; 2819 u8 *config_data = kmalloc(17, GFP_KERNEL);
2820 u32 if_num = interface->altsetting->desc.bInterfaceNumber; 2820 u32 if_num = interface->altsetting->desc.bInterfaceNumber;
2821 s32 result; 2821 s32 result;
2822 2822
2823 if (!config_data)
2824 return -ENOMEM;
2823 if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 2825 if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
2824 0x86, 0xC0, 0, 0, config_data, 17, 2826 0x86, 0xC0, 0, 0, config_data, 17,
2825 USB_CTRL_SET_TIMEOUT) != 0x11) { 2827 USB_CTRL_SET_TIMEOUT) != 0x11) {
2828 kfree(config_data);
2826 return -EIO; 2829 return -EIO;
2827 } 2830 }
2828 2831
@@ -2873,6 +2876,7 @@ static int hso_get_config_data(struct usb_interface *interface)
2873 if (config_data[16] & 0x1) 2876 if (config_data[16] & 0x1)
2874 result |= HSO_INFO_CRC_BUG; 2877 result |= HSO_INFO_CRC_BUG;
2875 2878
2879 kfree(config_data);
2876 return result; 2880 return result;
2877} 2881}
2878 2882
@@ -2886,6 +2890,11 @@ static int hso_probe(struct usb_interface *interface,
2886 struct hso_shared_int *shared_int; 2890 struct hso_shared_int *shared_int;
2887 struct hso_device *tmp_dev = NULL; 2891 struct hso_device *tmp_dev = NULL;
2888 2892
2893 if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
2894 dev_err(&interface->dev, "Not our interface\n");
2895 return -ENODEV;
2896 }
2897
2889 if_num = interface->altsetting->desc.bInterfaceNumber; 2898 if_num = interface->altsetting->desc.bInterfaceNumber;
2890 2899
2891 /* Get the interface/port specification from either driver_info or from 2900 /* Get the interface/port specification from either driver_info or from
@@ -2895,10 +2904,6 @@ static int hso_probe(struct usb_interface *interface,
2895 else 2904 else
2896 port_spec = hso_get_config_data(interface); 2905 port_spec = hso_get_config_data(interface);
2897 2906
2898 if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
2899 dev_err(&interface->dev, "Not our interface\n");
2900 return -ENODEV;
2901 }
2902 /* Check if we need to switch to alt interfaces prior to port 2907 /* Check if we need to switch to alt interfaces prior to port
2903 * configuration */ 2908 * configuration */
2904 if (interface->num_altsetting > 1) 2909 if (interface->num_altsetting > 1)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ee13f9eb740c..11c51f275366 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -344,17 +344,41 @@ static const int multicast_filter_limit = 32;
344static 344static
345int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) 345int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
346{ 346{
347 return usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), 347 int ret;
348 void *tmp;
349
350 tmp = kmalloc(size, GFP_KERNEL);
351 if (!tmp)
352 return -ENOMEM;
353
354 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
348 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, 355 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
349 value, index, data, size, 500); 356 value, index, tmp, size, 500);
357
358 memcpy(data, tmp, size);
359 kfree(tmp);
360
361 return ret;
350} 362}
351 363
352static 364static
353int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) 365int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
354{ 366{
355 return usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0), 367 int ret;
368 void *tmp;
369
370 tmp = kmalloc(size, GFP_KERNEL);
371 if (!tmp)
372 return -ENOMEM;
373
374 memcpy(tmp, data, size);
375
376 ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
356 RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, 377 RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
357 value, index, data, size, 500); 378 value, index, tmp, size, 500);
379
380 kfree(tmp);
381 return ret;
358} 382}
359 383
360static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, 384static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
@@ -490,37 +514,31 @@ int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
490 514
491static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index) 515static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index)
492{ 516{
493 u32 data; 517 __le32 data;
494 518
495 if (type == MCU_TYPE_PLA) 519 generic_ocp_read(tp, index, sizeof(data), &data, type);
496 pla_ocp_read(tp, index, sizeof(data), &data);
497 else
498 usb_ocp_read(tp, index, sizeof(data), &data);
499 520
500 return __le32_to_cpu(data); 521 return __le32_to_cpu(data);
501} 522}
502 523
503static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data) 524static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data)
504{ 525{
505 if (type == MCU_TYPE_PLA) 526 __le32 tmp = __cpu_to_le32(data);
506 pla_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data); 527
507 else 528 generic_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(tmp), &tmp, type);
508 usb_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
509} 529}
510 530
511static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index) 531static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
512{ 532{
513 u32 data; 533 u32 data;
534 __le32 tmp;
514 u8 shift = index & 2; 535 u8 shift = index & 2;
515 536
516 index &= ~3; 537 index &= ~3;
517 538
518 if (type == MCU_TYPE_PLA) 539 generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
519 pla_ocp_read(tp, index, sizeof(data), &data);
520 else
521 usb_ocp_read(tp, index, sizeof(data), &data);
522 540
523 data = __le32_to_cpu(data); 541 data = __le32_to_cpu(tmp);
524 data >>= (shift * 8); 542 data >>= (shift * 8);
525 data &= 0xffff; 543 data &= 0xffff;
526 544
@@ -529,7 +547,8 @@ static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
529 547
530static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) 548static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
531{ 549{
532 u32 tmp, mask = 0xffff; 550 u32 mask = 0xffff;
551 __le32 tmp;
533 u16 byen = BYTE_EN_WORD; 552 u16 byen = BYTE_EN_WORD;
534 u8 shift = index & 2; 553 u8 shift = index & 2;
535 554
@@ -542,34 +561,25 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
542 index &= ~3; 561 index &= ~3;
543 } 562 }
544 563
545 if (type == MCU_TYPE_PLA) 564 generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
546 pla_ocp_read(tp, index, sizeof(tmp), &tmp);
547 else
548 usb_ocp_read(tp, index, sizeof(tmp), &tmp);
549 565
550 tmp = __le32_to_cpu(tmp) & ~mask; 566 data |= __le32_to_cpu(tmp) & ~mask;
551 tmp |= data; 567 tmp = __cpu_to_le32(data);
552 tmp = __cpu_to_le32(tmp);
553 568
554 if (type == MCU_TYPE_PLA) 569 generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
555 pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
556 else
557 usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
558} 570}
559 571
560static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index) 572static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
561{ 573{
562 u32 data; 574 u32 data;
575 __le32 tmp;
563 u8 shift = index & 3; 576 u8 shift = index & 3;
564 577
565 index &= ~3; 578 index &= ~3;
566 579
567 if (type == MCU_TYPE_PLA) 580 generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
568 pla_ocp_read(tp, index, sizeof(data), &data);
569 else
570 usb_ocp_read(tp, index, sizeof(data), &data);
571 581
572 data = __le32_to_cpu(data); 582 data = __le32_to_cpu(tmp);
573 data >>= (shift * 8); 583 data >>= (shift * 8);
574 data &= 0xff; 584 data &= 0xff;
575 585
@@ -578,7 +588,8 @@ static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
578 588
579static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) 589static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
580{ 590{
581 u32 tmp, mask = 0xff; 591 u32 mask = 0xff;
592 __le32 tmp;
582 u16 byen = BYTE_EN_BYTE; 593 u16 byen = BYTE_EN_BYTE;
583 u8 shift = index & 3; 594 u8 shift = index & 3;
584 595
@@ -591,19 +602,12 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
591 index &= ~3; 602 index &= ~3;
592 } 603 }
593 604
594 if (type == MCU_TYPE_PLA) 605 generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
595 pla_ocp_read(tp, index, sizeof(tmp), &tmp);
596 else
597 usb_ocp_read(tp, index, sizeof(tmp), &tmp);
598 606
599 tmp = __le32_to_cpu(tmp) & ~mask; 607 data |= __le32_to_cpu(tmp) & ~mask;
600 tmp |= data; 608 tmp = __cpu_to_le32(data);
601 tmp = __cpu_to_le32(tmp);
602 609
603 if (type == MCU_TYPE_PLA) 610 generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
604 pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
605 else
606 usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
607} 611}
608 612
609static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value) 613static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
@@ -685,21 +689,14 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
685static inline void set_ethernet_addr(struct r8152 *tp) 689static inline void set_ethernet_addr(struct r8152 *tp)
686{ 690{
687 struct net_device *dev = tp->netdev; 691 struct net_device *dev = tp->netdev;
688 u8 *node_id; 692 u8 node_id[8] = {0};
689
690 node_id = kmalloc(sizeof(u8) * 8, GFP_KERNEL);
691 if (!node_id) {
692 netif_err(tp, probe, dev, "out of memory");
693 return;
694 }
695 693
696 if (pla_ocp_read(tp, PLA_IDR, sizeof(u8) * 8, node_id) < 0) 694 if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0)
697 netif_notice(tp, probe, dev, "inet addr fail\n"); 695 netif_notice(tp, probe, dev, "inet addr fail\n");
698 else { 696 else {
699 memcpy(dev->dev_addr, node_id, dev->addr_len); 697 memcpy(dev->dev_addr, node_id, dev->addr_len);
700 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 698 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
701 } 699 }
702 kfree(node_id);
703} 700}
704 701
705static int rtl8152_set_mac_address(struct net_device *netdev, void *p) 702static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
@@ -882,15 +879,10 @@ static void rtl8152_set_rx_mode(struct net_device *netdev)
882static void _rtl8152_set_rx_mode(struct net_device *netdev) 879static void _rtl8152_set_rx_mode(struct net_device *netdev)
883{ 880{
884 struct r8152 *tp = netdev_priv(netdev); 881 struct r8152 *tp = netdev_priv(netdev);
885 u32 tmp, *mc_filter; /* Multicast hash filter */ 882 u32 mc_filter[2]; /* Multicast hash filter */
883 __le32 tmp[2];
886 u32 ocp_data; 884 u32 ocp_data;
887 885
888 mc_filter = kmalloc(sizeof(u32) * 2, GFP_KERNEL);
889 if (!mc_filter) {
890 netif_err(tp, link, netdev, "out of memory");
891 return;
892 }
893
894 clear_bit(RTL8152_SET_RX_MODE, &tp->flags); 886 clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
895 netif_stop_queue(netdev); 887 netif_stop_queue(netdev);
896 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 888 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -918,14 +910,12 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
918 } 910 }
919 } 911 }
920 912
921 tmp = mc_filter[0]; 913 tmp[0] = __cpu_to_le32(swab32(mc_filter[1]));
922 mc_filter[0] = __cpu_to_le32(swab32(mc_filter[1])); 914 tmp[1] = __cpu_to_le32(swab32(mc_filter[0]));
923 mc_filter[1] = __cpu_to_le32(swab32(tmp));
924 915
925 pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(u32) * 2, mc_filter); 916 pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(tmp), tmp);
926 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 917 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
927 netif_wake_queue(netdev); 918 netif_wake_queue(netdev);
928 kfree(mc_filter);
929} 919}
930 920
931static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, 921static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
index 852392269718..2df2f4fb42a7 100644
--- a/drivers/net/usb/r815x.c
+++ b/drivers/net/usb/r815x.c
@@ -24,34 +24,43 @@
24 24
25static int pla_read_word(struct usb_device *udev, u16 index) 25static int pla_read_word(struct usb_device *udev, u16 index)
26{ 26{
27 int data, ret; 27 int ret;
28 u8 shift = index & 2; 28 u8 shift = index & 2;
29 __le32 ocp_data; 29 __le32 *tmp;
30
31 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
32 if (!tmp)
33 return -ENOMEM;
30 34
31 index &= ~3; 35 index &= ~3;
32 36
33 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 37 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
34 RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, 38 RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
35 index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data), 39 index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
36 500);
37 if (ret < 0) 40 if (ret < 0)
38 return ret; 41 goto out2;
39 42
40 data = __le32_to_cpu(ocp_data); 43 ret = __le32_to_cpu(*tmp);
41 data >>= (shift * 8); 44 ret >>= (shift * 8);
42 data &= 0xffff; 45 ret &= 0xffff;
43 46
44 return data; 47out2:
48 kfree(tmp);
49 return ret;
45} 50}
46 51
47static int pla_write_word(struct usb_device *udev, u16 index, u32 data) 52static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
48{ 53{
49 __le32 ocp_data; 54 __le32 *tmp;
50 u32 mask = 0xffff; 55 u32 mask = 0xffff;
51 u16 byen = BYTE_EN_WORD; 56 u16 byen = BYTE_EN_WORD;
52 u8 shift = index & 2; 57 u8 shift = index & 2;
53 int ret; 58 int ret;
54 59
60 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
61 if (!tmp)
62 return -ENOMEM;
63
55 data &= mask; 64 data &= mask;
56 65
57 if (shift) { 66 if (shift) {
@@ -63,19 +72,20 @@ static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
63 72
64 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 73 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
65 RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, 74 RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
66 index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data), 75 index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
67 500);
68 if (ret < 0) 76 if (ret < 0)
69 return ret; 77 goto out3;
70 78
71 data |= __le32_to_cpu(ocp_data) & ~mask; 79 data |= __le32_to_cpu(*tmp) & ~mask;
72 ocp_data = __cpu_to_le32(data); 80 *tmp = __cpu_to_le32(data);
73 81
74 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 82 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
75 RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE, 83 RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
76 index, MCU_TYPE_PLA | byen, &ocp_data, 84 index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp),
77 sizeof(ocp_data), 500); 85 500);
78 86
87out3:
88 kfree(tmp);
79 return ret; 89 return ret;
80} 90}
81 91
@@ -116,11 +126,18 @@ out1:
116static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg) 126static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
117{ 127{
118 struct usbnet *dev = netdev_priv(netdev); 128 struct usbnet *dev = netdev_priv(netdev);
129 int ret;
119 130
120 if (phy_id != R815x_PHY_ID) 131 if (phy_id != R815x_PHY_ID)
121 return -EINVAL; 132 return -EINVAL;
122 133
123 return ocp_reg_read(dev, BASE_MII + reg * 2); 134 if (usb_autopm_get_interface(dev->intf) < 0)
135 return -ENODEV;
136
137 ret = ocp_reg_read(dev, BASE_MII + reg * 2);
138
139 usb_autopm_put_interface(dev->intf);
140 return ret;
124} 141}
125 142
126static 143static
@@ -131,7 +148,12 @@ void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
131 if (phy_id != R815x_PHY_ID) 148 if (phy_id != R815x_PHY_ID)
132 return; 149 return;
133 150
151 if (usb_autopm_get_interface(dev->intf) < 0)
152 return;
153
134 ocp_reg_write(dev, BASE_MII + reg * 2, val); 154 ocp_reg_write(dev, BASE_MII + reg * 2, val);
155
156 usb_autopm_put_interface(dev->intf);
135} 157}
136 158
137static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) 159static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -150,7 +172,7 @@ static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
150 dev->mii.phy_id = R815x_PHY_ID; 172 dev->mii.phy_id = R815x_PHY_ID;
151 dev->mii.supports_gmii = 1; 173 dev->mii.supports_gmii = 1;
152 174
153 return 0; 175 return status;
154} 176}
155 177
156static int r8152_bind(struct usbnet *dev, struct usb_interface *intf) 178static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -169,7 +191,7 @@ static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
169 dev->mii.phy_id = R815x_PHY_ID; 191 dev->mii.phy_id = R815x_PHY_ID;
170 dev->mii.supports_gmii = 0; 192 dev->mii.supports_gmii = 0;
171 193
172 return 0; 194 return status;
173} 195}
174 196
175static const struct driver_info r8152_info = { 197static const struct driver_info r8152_info = {
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 75409748c774..66ebbacf066f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -45,7 +45,6 @@
45#define EEPROM_MAC_OFFSET (0x01) 45#define EEPROM_MAC_OFFSET (0x01)
46#define DEFAULT_TX_CSUM_ENABLE (true) 46#define DEFAULT_TX_CSUM_ENABLE (true)
47#define DEFAULT_RX_CSUM_ENABLE (true) 47#define DEFAULT_RX_CSUM_ENABLE (true)
48#define DEFAULT_TSO_ENABLE (true)
49#define SMSC75XX_INTERNAL_PHY_ID (1) 48#define SMSC75XX_INTERNAL_PHY_ID (1)
50#define SMSC75XX_TX_OVERHEAD (8) 49#define SMSC75XX_TX_OVERHEAD (8)
51#define MAX_RX_FIFO_SIZE (20 * 1024) 50#define MAX_RX_FIFO_SIZE (20 * 1024)
@@ -1410,17 +1409,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
1410 1409
1411 INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); 1410 INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
1412 1411
1413 if (DEFAULT_TX_CSUM_ENABLE) { 1412 if (DEFAULT_TX_CSUM_ENABLE)
1414 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1413 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1415 if (DEFAULT_TSO_ENABLE) 1414
1416 dev->net->features |= NETIF_F_SG |
1417 NETIF_F_TSO | NETIF_F_TSO6;
1418 }
1419 if (DEFAULT_RX_CSUM_ENABLE) 1415 if (DEFAULT_RX_CSUM_ENABLE)
1420 dev->net->features |= NETIF_F_RXCSUM; 1416 dev->net->features |= NETIF_F_RXCSUM;
1421 1417
1422 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1418 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1423 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; 1419 NETIF_F_RXCSUM;
1424 1420
1425 ret = smsc75xx_wait_ready(dev, 0); 1421 ret = smsc75xx_wait_ready(dev, 0);
1426 if (ret < 0) { 1422 if (ret < 0) {
@@ -2200,8 +2196,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
2200{ 2196{
2201 u32 tx_cmd_a, tx_cmd_b; 2197 u32 tx_cmd_a, tx_cmd_b;
2202 2198
2203 skb_linearize(skb);
2204
2205 if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { 2199 if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
2206 struct sk_buff *skb2 = 2200 struct sk_buff *skb2 =
2207 skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); 2201 skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index da866523cf20..eee1f19ef1e9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -269,6 +269,7 @@ static void veth_setup(struct net_device *dev)
269 dev->ethtool_ops = &veth_ethtool_ops; 269 dev->ethtool_ops = &veth_ethtool_ops;
270 dev->features |= NETIF_F_LLTX; 270 dev->features |= NETIF_F_LLTX;
271 dev->features |= VETH_FEATURES; 271 dev->features |= VETH_FEATURES;
272 dev->vlan_features = dev->features;
272 dev->destructor = veth_dev_free; 273 dev->destructor = veth_dev_free;
273 274
274 dev->hw_features = VETH_FEATURES; 275 dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a5ba8dd7e6be..767f7af3bd40 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -136,7 +136,8 @@ struct vxlan_dev {
136 u32 flags; /* VXLAN_F_* below */ 136 u32 flags; /* VXLAN_F_* below */
137 137
138 struct work_struct sock_work; 138 struct work_struct sock_work;
139 struct work_struct igmp_work; 139 struct work_struct igmp_join;
140 struct work_struct igmp_leave;
140 141
141 unsigned long age_interval; 142 unsigned long age_interval;
142 struct timer_list age_timer; 143 struct timer_list age_timer;
@@ -736,7 +737,6 @@ static bool vxlan_snoop(struct net_device *dev,
736 return false; 737 return false;
737} 738}
738 739
739
740/* See if multicast group is already in use by other ID */ 740/* See if multicast group is already in use by other ID */
741static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip) 741static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
742{ 742{
@@ -770,12 +770,13 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
770 queue_work(vxlan_wq, &vs->del_work); 770 queue_work(vxlan_wq, &vs->del_work);
771} 771}
772 772
773/* Callback to update multicast group membership. 773/* Callback to update multicast group membership when first VNI on
774 * Scheduled when vxlan goes up/down. 774 * multicast asddress is brought up
775 * Done as workqueue because ip_mc_join_group acquires RTNL.
775 */ 776 */
776static void vxlan_igmp_work(struct work_struct *work) 777static void vxlan_igmp_join(struct work_struct *work)
777{ 778{
778 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_work); 779 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
779 struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id); 780 struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
780 struct vxlan_sock *vs = vxlan->vn_sock; 781 struct vxlan_sock *vs = vxlan->vn_sock;
781 struct sock *sk = vs->sock->sk; 782 struct sock *sk = vs->sock->sk;
@@ -785,10 +786,27 @@ static void vxlan_igmp_work(struct work_struct *work)
785 }; 786 };
786 787
787 lock_sock(sk); 788 lock_sock(sk);
788 if (vxlan_group_used(vn, vxlan->default_dst.remote_ip)) 789 ip_mc_join_group(sk, &mreq);
789 ip_mc_join_group(sk, &mreq); 790 release_sock(sk);
790 else 791
791 ip_mc_leave_group(sk, &mreq); 792 vxlan_sock_release(vn, vs);
793 dev_put(vxlan->dev);
794}
795
796/* Inverse of vxlan_igmp_join when last VNI is brought down */
797static void vxlan_igmp_leave(struct work_struct *work)
798{
799 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
800 struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
801 struct vxlan_sock *vs = vxlan->vn_sock;
802 struct sock *sk = vs->sock->sk;
803 struct ip_mreqn mreq = {
804 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
805 .imr_ifindex = vxlan->default_dst.remote_ifindex,
806 };
807
808 lock_sock(sk);
809 ip_mc_leave_group(sk, &mreq);
792 release_sock(sk); 810 release_sock(sk);
793 811
794 vxlan_sock_release(vn, vs); 812 vxlan_sock_release(vn, vs);
@@ -1359,6 +1377,7 @@ static void vxlan_uninit(struct net_device *dev)
1359/* Start ageing timer and join group when device is brought up */ 1377/* Start ageing timer and join group when device is brought up */
1360static int vxlan_open(struct net_device *dev) 1378static int vxlan_open(struct net_device *dev)
1361{ 1379{
1380 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1362 struct vxlan_dev *vxlan = netdev_priv(dev); 1381 struct vxlan_dev *vxlan = netdev_priv(dev);
1363 struct vxlan_sock *vs = vxlan->vn_sock; 1382 struct vxlan_sock *vs = vxlan->vn_sock;
1364 1383
@@ -1366,10 +1385,11 @@ static int vxlan_open(struct net_device *dev)
1366 if (!vs) 1385 if (!vs)
1367 return -ENOTCONN; 1386 return -ENOTCONN;
1368 1387
1369 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) { 1388 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
1389 vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
1370 vxlan_sock_hold(vs); 1390 vxlan_sock_hold(vs);
1371 dev_hold(dev); 1391 dev_hold(dev);
1372 queue_work(vxlan_wq, &vxlan->igmp_work); 1392 queue_work(vxlan_wq, &vxlan->igmp_join);
1373 } 1393 }
1374 1394
1375 if (vxlan->age_interval) 1395 if (vxlan->age_interval)
@@ -1400,13 +1420,15 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
1400/* Cleanup timer and forwarding table on shutdown */ 1420/* Cleanup timer and forwarding table on shutdown */
1401static int vxlan_stop(struct net_device *dev) 1421static int vxlan_stop(struct net_device *dev)
1402{ 1422{
1423 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1403 struct vxlan_dev *vxlan = netdev_priv(dev); 1424 struct vxlan_dev *vxlan = netdev_priv(dev);
1404 struct vxlan_sock *vs = vxlan->vn_sock; 1425 struct vxlan_sock *vs = vxlan->vn_sock;
1405 1426
1406 if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) { 1427 if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
1428 ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
1407 vxlan_sock_hold(vs); 1429 vxlan_sock_hold(vs);
1408 dev_hold(dev); 1430 dev_hold(dev);
1409 queue_work(vxlan_wq, &vxlan->igmp_work); 1431 queue_work(vxlan_wq, &vxlan->igmp_leave);
1410 } 1432 }
1411 1433
1412 del_timer_sync(&vxlan->age_timer); 1434 del_timer_sync(&vxlan->age_timer);
@@ -1471,7 +1493,8 @@ static void vxlan_setup(struct net_device *dev)
1471 1493
1472 INIT_LIST_HEAD(&vxlan->next); 1494 INIT_LIST_HEAD(&vxlan->next);
1473 spin_lock_init(&vxlan->hash_lock); 1495 spin_lock_init(&vxlan->hash_lock);
1474 INIT_WORK(&vxlan->igmp_work, vxlan_igmp_work); 1496 INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
1497 INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
1475 INIT_WORK(&vxlan->sock_work, vxlan_sock_work); 1498 INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
1476 1499
1477 init_timer_deferrable(&vxlan->age_timer); 1500 init_timer_deferrable(&vxlan->age_timer);
@@ -1770,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1770 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 1793 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1771 struct vxlan_dev *vxlan = netdev_priv(dev); 1794 struct vxlan_dev *vxlan = netdev_priv(dev);
1772 1795
1773 flush_workqueue(vxlan_wq);
1774
1775 spin_lock(&vn->sock_lock); 1796 spin_lock(&vn->sock_lock);
1776 hlist_del_rcu(&vxlan->hlist); 1797 hlist_del_rcu(&vxlan->hlist);
1777 spin_unlock(&vn->sock_lock); 1798 spin_unlock(&vn->sock_lock);
@@ -1878,10 +1899,12 @@ static __net_exit void vxlan_exit_net(struct net *net)
1878{ 1899{
1879 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 1900 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1880 struct vxlan_dev *vxlan; 1901 struct vxlan_dev *vxlan;
1902 LIST_HEAD(list);
1881 1903
1882 rtnl_lock(); 1904 rtnl_lock();
1883 list_for_each_entry(vxlan, &vn->vxlan_list, next) 1905 list_for_each_entry(vxlan, &vn->vxlan_list, next)
1884 dev_close(vxlan->dev); 1906 unregister_netdevice_queue(vxlan->dev, &list);
1907 unregister_netdevice_many(&list);
1885 rtnl_unlock(); 1908 rtnl_unlock();
1886} 1909}
1887 1910
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index cde58fe96254..82e8088ca9b4 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -1,6 +1,6 @@
1config ATH10K 1config ATH10K
2 tristate "Atheros 802.11ac wireless cards support" 2 tristate "Atheros 802.11ac wireless cards support"
3 depends on MAC80211 3 depends on MAC80211 && HAS_DMA
4 select ATH_COMMON 4 select ATH_COMMON
5 ---help--- 5 ---help---
6 This module adds support for wireless adapters based on 6 This module adds support for wireless adapters based on
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 81b686c6a376..40825d43322e 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -325,7 +325,7 @@ ath5k_prepare_multicast(struct ieee80211_hw *hw,
325 struct netdev_hw_addr *ha; 325 struct netdev_hw_addr *ha;
326 326
327 mfilt[0] = 0; 327 mfilt[0] = 0;
328 mfilt[1] = 1; 328 mfilt[1] = 0;
329 329
330 netdev_hw_addr_list_for_each(ha, mc_list) { 330 netdev_hw_addr_list_for_each(ha, mc_list) {
331 /* calculate XOR of eight 6-bit values */ 331 /* calculate XOR of eight 6-bit values */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index d1acfe98918a..1576d58291d4 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -610,7 +610,15 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
610 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 610 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
611 611
612 if (AR_SREV_9280_20_OR_LATER(ah)) { 612 if (AR_SREV_9280_20_OR_LATER(ah)) {
613 val = REG_READ(ah, AR_PCU_MISC_MODE2); 613 /*
614 * For AR9280 and above, there is a new feature that allows
615 * Multicast search based on both MAC Address and Key ID.
616 * By default, this feature is enabled. But since the driver
617 * is not using this feature, we switch it off; otherwise
618 * multicast search based on MAC addr only will fail.
619 */
620 val = REG_READ(ah, AR_PCU_MISC_MODE2) &
621 (~AR_ADHOC_MCAST_KEYID_ENABLE);
614 622
615 if (!AR_SREV_9271(ah)) 623 if (!AR_SREV_9271(ah))
616 val &= ~AR_PCU_MISC_MODE2_HWWAR1; 624 val &= ~AR_PCU_MISC_MODE2_HWWAR1;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 9e582e14da74..5205a3625e84 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1082,7 +1082,7 @@ static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
1082 struct device *dev = &hif_dev->udev->dev; 1082 struct device *dev = &hif_dev->udev->dev;
1083 struct device *parent = dev->parent; 1083 struct device *parent = dev->parent;
1084 1084
1085 complete(&hif_dev->fw_done); 1085 complete_all(&hif_dev->fw_done);
1086 1086
1087 if (parent) 1087 if (parent)
1088 device_lock(parent); 1088 device_lock(parent);
@@ -1131,7 +1131,7 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
1131 1131
1132 release_firmware(fw); 1132 release_firmware(fw);
1133 hif_dev->flags |= HIF_USB_READY; 1133 hif_dev->flags |= HIF_USB_READY;
1134 complete(&hif_dev->fw_done); 1134 complete_all(&hif_dev->fw_done);
1135 1135
1136 return; 1136 return;
1137 1137
@@ -1295,7 +1295,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
1295 1295
1296 usb_set_intfdata(interface, NULL); 1296 usb_set_intfdata(interface, NULL);
1297 1297
1298 if (!unplugged && (hif_dev->flags & HIF_USB_START)) 1298 /* If firmware was loaded we should drop it
1299 * go back to first stage bootloader. */
1300 if (!unplugged && (hif_dev->flags & HIF_USB_READY))
1299 ath9k_hif_usb_reboot(udev); 1301 ath9k_hif_usb_reboot(udev);
1300 1302
1301 kfree(hif_dev); 1303 kfree(hif_dev);
@@ -1316,7 +1318,10 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface,
1316 if (!(hif_dev->flags & HIF_USB_START)) 1318 if (!(hif_dev->flags & HIF_USB_START))
1317 ath9k_htc_suspend(hif_dev->htc_handle); 1319 ath9k_htc_suspend(hif_dev->htc_handle);
1318 1320
1319 ath9k_hif_usb_dealloc_urbs(hif_dev); 1321 wait_for_completion(&hif_dev->fw_done);
1322
1323 if (hif_dev->flags & HIF_USB_READY)
1324 ath9k_hif_usb_dealloc_urbs(hif_dev);
1320 1325
1321 return 0; 1326 return 0;
1322} 1327}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 71a183ffc77f..c3676bf1d6c4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -861,6 +861,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
861 if (error != 0) 861 if (error != 0)
862 goto err_rx; 862 goto err_rx;
863 863
864 ath9k_hw_disable(priv->ah);
864#ifdef CONFIG_MAC80211_LEDS 865#ifdef CONFIG_MAC80211_LEDS
865 /* must be initialized before ieee80211_register_hw */ 866 /* must be initialized before ieee80211_register_hw */
866 priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw, 867 priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index c59ae43b9b35..927992732620 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -146,6 +146,28 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
146 ARRAY_SIZE(bf->rates)); 146 ARRAY_SIZE(bf->rates));
147} 147}
148 148
149static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
150 struct sk_buff *skb)
151{
152 int q;
153
154 q = skb_get_queue_mapping(skb);
155 if (txq == sc->tx.uapsdq)
156 txq = sc->tx.txq_map[q];
157
158 if (txq != sc->tx.txq_map[q])
159 return;
160
161 if (WARN_ON(--txq->pending_frames < 0))
162 txq->pending_frames = 0;
163
164 if (txq->stopped &&
165 txq->pending_frames < sc->tx.txq_max_pending[q]) {
166 ieee80211_wake_queue(sc->hw, q);
167 txq->stopped = false;
168 }
169}
170
149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 171static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{ 172{
151 struct ath_txq *txq = tid->ac->txq; 173 struct ath_txq *txq = tid->ac->txq;
@@ -167,6 +189,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
167 if (!bf) { 189 if (!bf) {
168 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 190 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
169 if (!bf) { 191 if (!bf) {
192 ath_txq_skb_done(sc, txq, skb);
170 ieee80211_free_txskb(sc->hw, skb); 193 ieee80211_free_txskb(sc->hw, skb);
171 continue; 194 continue;
172 } 195 }
@@ -811,6 +834,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
811 834
812 if (!bf) { 835 if (!bf) {
813 __skb_unlink(skb, &tid->buf_q); 836 __skb_unlink(skb, &tid->buf_q);
837 ath_txq_skb_done(sc, txq, skb);
814 ieee80211_free_txskb(sc->hw, skb); 838 ieee80211_free_txskb(sc->hw, skb);
815 continue; 839 continue;
816 } 840 }
@@ -1824,6 +1848,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
1824 1848
1825 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 1849 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1826 if (!bf) { 1850 if (!bf) {
1851 ath_txq_skb_done(sc, txq, skb);
1827 ieee80211_free_txskb(sc->hw, skb); 1852 ieee80211_free_txskb(sc->hw, skb);
1828 return; 1853 return;
1829 } 1854 }
@@ -2090,6 +2115,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2090 2115
2091 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 2116 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2092 if (!bf) { 2117 if (!bf) {
2118 ath_txq_skb_done(sc, txq, skb);
2093 if (txctl->paprd) 2119 if (txctl->paprd)
2094 dev_kfree_skb_any(skb); 2120 dev_kfree_skb_any(skb);
2095 else 2121 else
@@ -2189,7 +2215,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2189 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2215 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2190 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2216 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2191 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 2217 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2192 int q, padpos, padsize; 2218 int padpos, padsize;
2193 unsigned long flags; 2219 unsigned long flags;
2194 2220
2195 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); 2221 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
@@ -2225,21 +2251,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2225 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 2251 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2226 2252
2227 __skb_queue_tail(&txq->complete_q, skb); 2253 __skb_queue_tail(&txq->complete_q, skb);
2228 2254 ath_txq_skb_done(sc, txq, skb);
2229 q = skb_get_queue_mapping(skb);
2230 if (txq == sc->tx.uapsdq)
2231 txq = sc->tx.txq_map[q];
2232
2233 if (txq == sc->tx.txq_map[q]) {
2234 if (WARN_ON(--txq->pending_frames < 0))
2235 txq->pending_frames = 0;
2236
2237 if (txq->stopped &&
2238 txq->pending_frames < sc->tx.txq_max_pending[q]) {
2239 ieee80211_wake_queue(sc->hw, q);
2240 txq->stopped = false;
2241 }
2242 }
2243} 2255}
2244 2256
2245static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 2257static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index e8308ec30970..ab636767fbde 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -145,7 +145,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
145 le16_to_cpu(hdr.type), hdr.flags); 145 le16_to_cpu(hdr.type), hdr.flags);
146 if (len <= MAX_MBOXITEM_SIZE) { 146 if (len <= MAX_MBOXITEM_SIZE) {
147 int n = 0; 147 int n = 0;
148 unsigned char printbuf[16 * 3 + 2]; 148 char printbuf[16 * 3 + 2];
149 unsigned char databuf[MAX_MBOXITEM_SIZE]; 149 unsigned char databuf[MAX_MBOXITEM_SIZE];
150 void __iomem *src = wmi_buffer(wil, d.addr) + 150 void __iomem *src = wmi_buffer(wil, d.addr) +
151 sizeof(struct wil6210_mbox_hdr); 151 sizeof(struct wil6210_mbox_hdr);
@@ -416,7 +416,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
416 seq_printf(s, " SKB = %p\n", skb); 416 seq_printf(s, " SKB = %p\n", skb);
417 417
418 if (skb) { 418 if (skb) {
419 unsigned char printbuf[16 * 3 + 2]; 419 char printbuf[16 * 3 + 2];
420 int i = 0; 420 int i = 0;
421 int len = le16_to_cpu(d->dma.length); 421 int len = le16_to_cpu(d->dma.length);
422 void *p = skb->data; 422 void *p = skb->data;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 8e8975562ec3..80099016d21f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -242,7 +242,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
242{ 242{
243 unsigned long flags; 243 unsigned long flags;
244 244
245 if (!ifp) 245 if (!ifp || !ifp->ndev)
246 return; 246 return;
247 247
248 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n", 248 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index f0d9f7f6c83d..29b1f24c2d0f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -1744,13 +1744,14 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1744 ulong flags; 1744 ulong flags;
1745 int fifo = BRCMF_FWS_FIFO_BCMC; 1745 int fifo = BRCMF_FWS_FIFO_BCMC;
1746 bool multicast = is_multicast_ether_addr(eh->h_dest); 1746 bool multicast = is_multicast_ether_addr(eh->h_dest);
1747 bool pae = eh->h_proto == htons(ETH_P_PAE);
1747 1748
1748 /* determine the priority */ 1749 /* determine the priority */
1749 if (!skb->priority) 1750 if (!skb->priority)
1750 skb->priority = cfg80211_classify8021d(skb); 1751 skb->priority = cfg80211_classify8021d(skb);
1751 1752
1752 drvr->tx_multicast += !!multicast; 1753 drvr->tx_multicast += !!multicast;
1753 if (ntohs(eh->h_proto) == ETH_P_PAE) 1754 if (pae)
1754 atomic_inc(&ifp->pend_8021x_cnt); 1755 atomic_inc(&ifp->pend_8021x_cnt);
1755 1756
1756 if (!brcmf_fws_fc_active(fws)) { 1757 if (!brcmf_fws_fc_active(fws)) {
@@ -1781,6 +1782,11 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1781 brcmf_fws_schedule_deq(fws); 1782 brcmf_fws_schedule_deq(fws);
1782 } else { 1783 } else {
1783 brcmf_err("drop skb: no hanger slot\n"); 1784 brcmf_err("drop skb: no hanger slot\n");
1785 if (pae) {
1786 atomic_dec(&ifp->pend_8021x_cnt);
1787 if (waitqueue_active(&ifp->pend_8021x_wait))
1788 wake_up(&ifp->pend_8021x_wait);
1789 }
1784 brcmu_pkt_buf_free_skb(skb); 1790 brcmu_pkt_buf_free_skb(skb);
1785 } 1791 }
1786 brcmf_fws_unlock(drvr, flags); 1792 brcmf_fws_unlock(drvr, flags);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 277b37ae7126..7fa71f73cfe8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1093,8 +1093,11 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
1093 brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n "); 1093 brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n ");
1094 err = brcmf_fil_cmd_data_set(vif->ifp, 1094 err = brcmf_fil_cmd_data_set(vif->ifp,
1095 BRCMF_C_DISASSOC, NULL, 0); 1095 BRCMF_C_DISASSOC, NULL, 0);
1096 if (err) 1096 if (err) {
1097 brcmf_err("WLC_DISASSOC failed (%d)\n", err); 1097 brcmf_err("WLC_DISASSOC failed (%d)\n", err);
1098 cfg80211_disconnected(vif->wdev.netdev, 0,
1099 NULL, 0, GFP_KERNEL);
1100 }
1098 clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state); 1101 clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
1099 } 1102 }
1100 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state); 1103 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 7365674366f4..010b252be584 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv)
1406 if (!priv->join_status) 1406 if (!priv->join_status)
1407 goto done; 1407 goto done;
1408 1408
1409 if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { 1409 if (priv->join_status == CW1200_JOIN_STATUS_AP)
1410 wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", 1410 goto done;
1411 priv->join_status);
1412 BUG_ON(1);
1413 }
1414 1411
1415 cancel_work_sync(&priv->update_filtering_work); 1412 cancel_work_sync(&priv->update_filtering_work);
1416 cancel_work_sync(&priv->set_beacon_wakeup_period_work); 1413 cancel_work_sync(&priv->set_beacon_wakeup_period_work);
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
index 5862c373d714..e824d4d4a18d 100644
--- a/drivers/net/wireless/cw1200/txrx.c
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -1165,7 +1165,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
1165 if (cw1200_handle_action_rx(priv, skb)) 1165 if (cw1200_handle_action_rx(priv, skb))
1166 return; 1166 return;
1167 } else if (ieee80211_is_beacon(frame->frame_control) && 1167 } else if (ieee80211_is_beacon(frame->frame_control) &&
1168 !arg->status && 1168 !arg->status && priv->vif &&
1169 !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid, 1169 !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
1170 ETH_ALEN)) { 1170 ETH_ALEN)) {
1171 const u8 *tim_ie; 1171 const u8 *tim_ie;
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index ac074731335a..e5090309824e 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
523 523
524 data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); 524 data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
525 525
526 memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); 526 memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
527 data->flags = 1; /* has quality information */ 527 data->flags = 1; /* has quality information */
528 memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, 528 memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
529 sizeof(struct iw_quality) * data->length); 529 sizeof(struct iw_quality) * data->length);
530 530
531 kfree(addr); 531 kfree(addr);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index b9b2bb51e605..f2ed62e37340 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -4460,12 +4460,12 @@ il4965_irq_tasklet(struct il_priv *il)
4460 * is killed. Hence update the killswitch state here. The 4460 * is killed. Hence update the killswitch state here. The
4461 * rfkill handler will care about restarting if needed. 4461 * rfkill handler will care about restarting if needed.
4462 */ 4462 */
4463 if (!test_bit(S_ALIVE, &il->status)) { 4463 if (hw_rf_kill) {
4464 if (hw_rf_kill) 4464 set_bit(S_RFKILL, &il->status);
4465 set_bit(S_RFKILL, &il->status); 4465 } else {
4466 else 4466 clear_bit(S_RFKILL, &il->status);
4467 clear_bit(S_RFKILL, &il->status);
4468 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); 4467 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4468 il_force_reset(il, true);
4469 } 4469 }
4470 4470
4471 handled |= CSR_INT_BIT_RF_KILL; 4471 handled |= CSR_INT_BIT_RF_KILL;
@@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il)
5334 5334
5335 il->active_rate = RATES_MASK; 5335 il->active_rate = RATES_MASK;
5336 5336
5337 il_power_update_mode(il, true);
5338 D_INFO("Updated power mode\n");
5339
5337 if (il_is_associated(il)) { 5340 if (il_is_associated(il)) {
5338 struct il_rxon_cmd *active_rxon = 5341 struct il_rxon_cmd *active_rxon =
5339 (struct il_rxon_cmd *)&il->active; 5342 (struct il_rxon_cmd *)&il->active;
@@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il)
5364 D_INFO("ALIVE processing complete.\n"); 5367 D_INFO("ALIVE processing complete.\n");
5365 wake_up(&il->wait_command_queue); 5368 wake_up(&il->wait_command_queue);
5366 5369
5367 il_power_update_mode(il, true);
5368 D_INFO("Updated power mode\n");
5369
5370 return; 5370 return;
5371 5371
5372restart: 5372restart:
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 3195aad440dd..b03e22ef5462 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external)
4660 4660
4661 return 0; 4661 return 0;
4662} 4662}
4663EXPORT_SYMBOL(il_force_reset);
4663 4664
4664int 4665int
4665il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4666il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 822f1a00efbb..319387263e12 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1068,7 +1068,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
1068 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1068 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1069 return; 1069 return;
1070 1070
1071 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) 1071 if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
1072 return;
1073
1074 if (ctx->vif)
1072 ieee80211_chswitch_done(ctx->vif, is_success); 1075 ieee80211_chswitch_done(ctx->vif, is_success);
1073} 1076}
1074 1077
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 3952ddf2ddb2..1531a4fc0960 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -758,7 +758,7 @@ int iwl_alive_start(struct iwl_priv *priv)
758 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); 758 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
759 if (ret) 759 if (ret)
760 return ret; 760 return ret;
761 } else { 761 } else if (priv->lib->bt_params) {
762 /* 762 /*
763 * default is 2-wire BT coexexistence support 763 * default is 2-wire BT coexexistence support
764 */ 764 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 7e5e5c2f9f87..83da884cf303 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -134,7 +134,7 @@ struct wowlan_key_data {
134 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; 134 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
135 struct iwl_wowlan_tkip_params_cmd *tkip; 135 struct iwl_wowlan_tkip_params_cmd *tkip;
136 bool error, use_rsc_tsc, use_tkip; 136 bool error, use_rsc_tsc, use_tkip;
137 int gtk_key_idx; 137 int wep_key_idx;
138}; 138};
139 139
140static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, 140static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
@@ -188,8 +188,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
188 wkc.wep_key.key_offset = 0; 188 wkc.wep_key.key_offset = 0;
189 } else { 189 } else {
190 /* others start at 1 */ 190 /* others start at 1 */
191 data->gtk_key_idx++; 191 data->wep_key_idx++;
192 wkc.wep_key.key_offset = data->gtk_key_idx; 192 wkc.wep_key.key_offset = data->wep_key_idx;
193 } 193 }
194 194
195 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC, 195 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
@@ -316,8 +316,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
316 mvm->ptk_ivlen = key->iv_len; 316 mvm->ptk_ivlen = key->iv_len;
317 mvm->ptk_icvlen = key->icv_len; 317 mvm->ptk_icvlen = key->icv_len;
318 } else { 318 } else {
319 data->gtk_key_idx++; 319 /*
320 key->hw_key_idx = data->gtk_key_idx; 320 * firmware only supports TSC/RSC for a single key,
321 * so if there are multiple keep overwriting them
322 * with new ones -- this relies on mac80211 doing
323 * list_add_tail().
324 */
325 key->hw_key_idx = 1;
321 mvm->gtk_ivlen = key->iv_len; 326 mvm->gtk_ivlen = key->iv_len;
322 mvm->gtk_icvlen = key->icv_len; 327 mvm->gtk_icvlen = key->icv_len;
323 } 328 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index e56ed2a84888..c24a744910ac 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -988,7 +988,11 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
988 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 988 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
989 char buf[100]; 989 char buf[100];
990 990
991 if (!dbgfs_dir) 991 /*
992 * Check if debugfs directory already exist before creating it.
993 * This may happen when, for example, resetting hw or suspend-resume
994 */
995 if (!dbgfs_dir || mvmvif->dbgfs_dir)
992 return; 996 return;
993 997
994 mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); 998 mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index b60d14151721..365095a0c3b3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -69,7 +69,6 @@
69/* Scan Commands, Responses, Notifications */ 69/* Scan Commands, Responses, Notifications */
70 70
71/* Masks for iwl_scan_channel.type flags */ 71/* Masks for iwl_scan_channel.type flags */
72#define SCAN_CHANNEL_TYPE_PASSIVE 0
73#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0) 72#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
74#define SCAN_CHANNEL_NARROW_BAND BIT(22) 73#define SCAN_CHANNEL_NARROW_BAND BIT(22)
75 74
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e08683b20531..f19baf0dea6b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -257,7 +257,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
257 if (ret) 257 if (ret)
258 return ret; 258 return ret;
259 259
260 return ieee80211_register_hw(mvm->hw); 260 ret = ieee80211_register_hw(mvm->hw);
261 if (ret)
262 iwl_mvm_leds_exit(mvm);
263
264 return ret;
261} 265}
262 266
263static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, 267static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -385,6 +389,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
385 ieee80211_wake_queues(mvm->hw); 389 ieee80211_wake_queues(mvm->hw);
386 390
387 mvm->vif_count = 0; 391 mvm->vif_count = 0;
392 mvm->rx_ba_sessions = 0;
388} 393}
389 394
390static int iwl_mvm_mac_start(struct ieee80211_hw *hw) 395static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
@@ -507,6 +512,27 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
507 goto out_unlock; 512 goto out_unlock;
508 513
509 /* 514 /*
515 * TODO: remove this temporary code.
516 * Currently MVM FW supports power management only on single MAC.
517 * If new interface added, disable PM on existing interface.
518 * P2P device is a special case, since it is handled by FW similary to
519 * scan. If P2P deviced is added, PM remains enabled on existing
520 * interface.
521 * Note: the method below does not count the new interface being added
522 * at this moment.
523 */
524 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
525 mvm->vif_count++;
526 if (mvm->vif_count > 1) {
527 IWL_DEBUG_MAC80211(mvm,
528 "Disable power on existing interfaces\n");
529 ieee80211_iterate_active_interfaces_atomic(
530 mvm->hw,
531 IEEE80211_IFACE_ITER_NORMAL,
532 iwl_mvm_pm_disable_iterator, mvm);
533 }
534
535 /*
510 * The AP binding flow can be done only after the beacon 536 * The AP binding flow can be done only after the beacon
511 * template is configured (which happens only in the mac80211 537 * template is configured (which happens only in the mac80211
512 * start_ap() flow), and adding the broadcast station can happen 538 * start_ap() flow), and adding the broadcast station can happen
@@ -529,27 +555,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
529 goto out_unlock; 555 goto out_unlock;
530 } 556 }
531 557
532 /*
533 * TODO: remove this temporary code.
534 * Currently MVM FW supports power management only on single MAC.
535 * If new interface added, disable PM on existing interface.
536 * P2P device is a special case, since it is handled by FW similary to
537 * scan. If P2P deviced is added, PM remains enabled on existing
538 * interface.
539 * Note: the method below does not count the new interface being added
540 * at this moment.
541 */
542 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
543 mvm->vif_count++;
544 if (mvm->vif_count > 1) {
545 IWL_DEBUG_MAC80211(mvm,
546 "Disable power on existing interfaces\n");
547 ieee80211_iterate_active_interfaces_atomic(
548 mvm->hw,
549 IEEE80211_IFACE_ITER_NORMAL,
550 iwl_mvm_pm_disable_iterator, mvm);
551 }
552
553 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 558 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
554 if (ret) 559 if (ret)
555 goto out_release; 560 goto out_release;
@@ -1006,6 +1011,21 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
1006 mutex_lock(&mvm->mutex); 1011 mutex_lock(&mvm->mutex);
1007 if (old_state == IEEE80211_STA_NOTEXIST && 1012 if (old_state == IEEE80211_STA_NOTEXIST &&
1008 new_state == IEEE80211_STA_NONE) { 1013 new_state == IEEE80211_STA_NONE) {
1014 /*
1015 * Firmware bug - it'll crash if the beacon interval is less
1016 * than 16. We can't avoid connecting at all, so refuse the
1017 * station state change, this will cause mac80211 to abandon
1018 * attempts to connect to this AP, and eventually wpa_s will
1019 * blacklist the AP...
1020 */
1021 if (vif->type == NL80211_IFTYPE_STATION &&
1022 vif->bss_conf.beacon_int < 16) {
1023 IWL_ERR(mvm,
1024 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
1025 sta->addr, vif->bss_conf.beacon_int);
1026 ret = -EINVAL;
1027 goto out_unlock;
1028 }
1009 ret = iwl_mvm_add_sta(mvm, vif, sta); 1029 ret = iwl_mvm_add_sta(mvm, vif, sta);
1010 } else if (old_state == IEEE80211_STA_NONE && 1030 } else if (old_state == IEEE80211_STA_NONE &&
1011 new_state == IEEE80211_STA_AUTH) { 1031 new_state == IEEE80211_STA_AUTH) {
@@ -1038,6 +1058,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
1038 } else { 1058 } else {
1039 ret = -EIO; 1059 ret = -EIO;
1040 } 1060 }
1061 out_unlock:
1041 mutex_unlock(&mvm->mutex); 1062 mutex_unlock(&mvm->mutex);
1042 1063
1043 return ret; 1064 return ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d40d7db185d6..420e82d379d9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -419,6 +419,7 @@ struct iwl_mvm {
419 struct work_struct sta_drained_wk; 419 struct work_struct sta_drained_wk;
420 unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; 420 unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
421 atomic_t pending_frames[IWL_MVM_STATION_COUNT]; 421 atomic_t pending_frames[IWL_MVM_STATION_COUNT];
422 u8 rx_ba_sessions;
422 423
423 /* configured by mac80211 */ 424 /* configured by mac80211 */
424 u32 rts_threshold; 425 u32 rts_threshold;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 2157b0f8ced5..acdff6b67e04 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -137,8 +137,8 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
137{ 137{
138 int fw_idx, req_idx; 138 int fw_idx, req_idx;
139 139
140 fw_idx = 0; 140 for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
141 for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) { 141 req_idx--, fw_idx++) {
142 cmd->direct_scan[fw_idx].id = WLAN_EID_SSID; 142 cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
143 cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len; 143 cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
144 memcpy(cmd->direct_scan[fw_idx].ssid, 144 memcpy(cmd->direct_scan[fw_idx].ssid,
@@ -153,7 +153,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
153 * just to notify that this scan is active and not passive. 153 * just to notify that this scan is active and not passive.
154 * In order to notify the FW of the number of SSIDs we wish to scan (including 154 * In order to notify the FW of the number of SSIDs we wish to scan (including
155 * the zero-length one), we need to set the corresponding bits in chan->type, 155 * the zero-length one), we need to set the corresponding bits in chan->type,
156 * one for each SSID, and set the active bit (first). 156 * one for each SSID, and set the active bit (first). The first SSID is already
157 * included in the probe template, so we need to set only req->n_ssids - 1 bits
158 * in addition to the first bit.
157 */ 159 */
158static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) 160static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
159{ 161{
@@ -176,19 +178,12 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
176 struct iwl_scan_channel *chan = (struct iwl_scan_channel *) 178 struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
177 (cmd->data + le16_to_cpu(cmd->tx_cmd.len)); 179 (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
178 int i; 180 int i;
179 __le32 chan_type_value;
180
181 if (req->n_ssids > 0)
182 chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1);
183 else
184 chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
185 181
186 for (i = 0; i < cmd->channel_count; i++) { 182 for (i = 0; i < cmd->channel_count; i++) {
187 chan->channel = cpu_to_le16(req->channels[i]->hw_value); 183 chan->channel = cpu_to_le16(req->channels[i]->hw_value);
184 chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
188 if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) 185 if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
189 chan->type = SCAN_CHANNEL_TYPE_PASSIVE; 186 chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
190 else
191 chan->type = chan_type_value;
192 chan->active_dwell = cpu_to_le16(active_dwell); 187 chan->active_dwell = cpu_to_le16(active_dwell);
193 chan->passive_dwell = cpu_to_le16(passive_dwell); 188 chan->passive_dwell = cpu_to_le16(passive_dwell);
194 chan->iteration_count = cpu_to_le16(1); 189 chan->iteration_count = cpu_to_le16(1);
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 62fe5209093b..563f559b902d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -608,6 +608,8 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta)
608 return ret; 608 return ret;
609} 609}
610 610
611#define IWL_MAX_RX_BA_SESSIONS 16
612
611int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 613int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
612 int tid, u16 ssn, bool start) 614 int tid, u16 ssn, bool start)
613{ 615{
@@ -618,11 +620,20 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
618 620
619 lockdep_assert_held(&mvm->mutex); 621 lockdep_assert_held(&mvm->mutex);
620 622
623 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
624 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
625 return -ENOSPC;
626 }
627
621 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 628 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
622 cmd.sta_id = mvm_sta->sta_id; 629 cmd.sta_id = mvm_sta->sta_id;
623 cmd.add_modify = STA_MODE_MODIFY; 630 cmd.add_modify = STA_MODE_MODIFY;
624 cmd.add_immediate_ba_tid = (u8) tid; 631 if (start) {
625 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 632 cmd.add_immediate_ba_tid = (u8) tid;
633 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
634 } else {
635 cmd.remove_immediate_ba_tid = (u8) tid;
636 }
626 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : 637 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
627 STA_MODIFY_REMOVE_BA_TID; 638 STA_MODIFY_REMOVE_BA_TID;
628 639
@@ -648,6 +659,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
648 break; 659 break;
649 } 660 }
650 661
662 if (!ret) {
663 if (start)
664 mvm->rx_ba_sessions++;
665 else if (mvm->rx_ba_sessions > 0)
666 /* check that restart flow didn't zero the counter */
667 mvm->rx_ba_sessions--;
668 }
669
651 return ret; 670 return ret;
652} 671}
653 672
@@ -896,6 +915,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
896 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; 915 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
897 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 916 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
898 u16 txq_id; 917 u16 txq_id;
918 enum iwl_mvm_agg_state old_state;
899 919
900 /* 920 /*
901 * First set the agg state to OFF to avoid calling 921 * First set the agg state to OFF to avoid calling
@@ -905,13 +925,17 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
905 txq_id = tid_data->txq_id; 925 txq_id = tid_data->txq_id;
906 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", 926 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
907 mvmsta->sta_id, tid, txq_id, tid_data->state); 927 mvmsta->sta_id, tid, txq_id, tid_data->state);
928 old_state = tid_data->state;
908 tid_data->state = IWL_AGG_OFF; 929 tid_data->state = IWL_AGG_OFF;
909 spin_unlock_bh(&mvmsta->lock); 930 spin_unlock_bh(&mvmsta->lock);
910 931
911 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true)) 932 if (old_state >= IWL_AGG_ON) {
912 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 933 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
934 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
935
936 iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
937 }
913 938
914 iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
915 mvm->queue_to_mac80211[tid_data->txq_id] = 939 mvm->queue_to_mac80211[tid_data->txq_id] =
916 IWL_INVALID_MAC80211_QUEUE; 940 IWL_INVALID_MAC80211_QUEUE;
917 941
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index ad9bbca99213..7fd6fbfbc1b3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -138,6 +138,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
138 schedule_work(&mvm->roc_done_wk); 138 schedule_work(&mvm->roc_done_wk);
139} 139}
140 140
141static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
142 struct ieee80211_vif *vif,
143 const char *errmsg)
144{
145 if (vif->type != NL80211_IFTYPE_STATION)
146 return false;
147 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
148 return false;
149 if (errmsg)
150 IWL_ERR(mvm, "%s\n", errmsg);
151 ieee80211_connection_loss(vif);
152 return true;
153}
154
141/* 155/*
142 * Handles a FW notification for an event that is known to the driver. 156 * Handles a FW notification for an event that is known to the driver.
143 * 157 *
@@ -163,8 +177,13 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
163 * P2P Device discoveribility, while there are other higher priority 177 * P2P Device discoveribility, while there are other higher priority
164 * events in the system). 178 * events in the system).
165 */ 179 */
166 WARN_ONCE(!le32_to_cpu(notif->status), 180 if (WARN_ONCE(!le32_to_cpu(notif->status),
167 "Failed to schedule time event\n"); 181 "Failed to schedule time event\n")) {
182 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
183 iwl_mvm_te_clear_data(mvm, te_data);
184 return;
185 }
186 }
168 187
169 if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { 188 if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
170 IWL_DEBUG_TE(mvm, 189 IWL_DEBUG_TE(mvm,
@@ -180,14 +199,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
180 * By now, we should have finished association 199 * By now, we should have finished association
181 * and know the dtim period. 200 * and know the dtim period.
182 */ 201 */
183 if (te_data->vif->type == NL80211_IFTYPE_STATION && 202 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
184 (!te_data->vif->bss_conf.assoc || 203 "No assocation and the time event is over already...");
185 !te_data->vif->bss_conf.dtim_period)) {
186 IWL_ERR(mvm,
187 "No assocation and the time event is over already...\n");
188 ieee80211_connection_loss(te_data->vif);
189 }
190
191 iwl_mvm_te_clear_data(mvm, te_data); 204 iwl_mvm_te_clear_data(mvm, te_data);
192 } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { 205 } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
193 te_data->running = true; 206 te_data->running = true;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 81f3ea5b09a4..ff13458efc27 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -130,6 +130,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
130 {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ 130 {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
131 {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ 131 {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
132 {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ 132 {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
133 {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
133 134
134 {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ 135 {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
135 {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ 136 {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 826c15602c46..390e2f058aff 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -670,6 +670,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
670 return err; 670 return err;
671 } 671 }
672 672
673 /* Reset the entire device */
674 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
675
676 usleep_range(10, 15);
677
673 iwl_pcie_apm_init(trans); 678 iwl_pcie_apm_init(trans);
674 679
675 /* From now on, the op_mode will be kept updated about RF kill state */ 680 /* From now on, the op_mode will be kept updated about RF kill state */
@@ -1497,16 +1502,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1497 spin_lock_init(&trans_pcie->reg_lock); 1502 spin_lock_init(&trans_pcie->reg_lock);
1498 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 1503 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1499 1504
1500 /* W/A - seems to solve weird behavior. We need to remove this if we
1501 * don't want to stay in L1 all the time. This wastes a lot of power */
1502 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1503 PCIE_LINK_STATE_CLKPM);
1504
1505 if (pci_enable_device(pdev)) { 1505 if (pci_enable_device(pdev)) {
1506 err = -ENODEV; 1506 err = -ENODEV;
1507 goto out_no_pci; 1507 goto out_no_pci;
1508 } 1508 }
1509 1509
1510 /* W/A - seems to solve weird behavior. We need to remove this if we
1511 * don't want to stay in L1 all the time. This wastes a lot of power */
1512 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1513 PCIE_LINK_STATE_CLKPM);
1514
1510 pci_set_master(pdev); 1515 pci_set_master(pdev);
1511 1516
1512 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 1517 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index ef5fa890a286..89459db4c53b 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1716,9 +1716,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
1716 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1716 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1717 int ret; 1717 int ret;
1718 1718
1719 if (priv->bss_mode != NL80211_IFTYPE_STATION) { 1719 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
1720 wiphy_err(wiphy, 1720 wiphy_err(wiphy,
1721 "%s: reject infra assoc request in non-STA mode\n", 1721 "%s: reject infra assoc request in non-STA role\n",
1722 dev->name); 1722 dev->name);
1723 return -EINVAL; 1723 return -EINVAL;
1724 } 1724 }
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 988552dece75..5178c4630d89 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -415,7 +415,8 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
415 u32 k = 0; 415 u32 k = 0;
416 struct mwifiex_adapter *adapter = priv->adapter; 416 struct mwifiex_adapter *adapter = priv->adapter;
417 417
418 if (priv->bss_mode == NL80211_IFTYPE_STATION) { 418 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
419 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
419 switch (adapter->config_bands) { 420 switch (adapter->config_bands) {
420 case BAND_B: 421 case BAND_B:
421 dev_dbg(adapter->dev, "info: infra band=%d " 422 dev_dbg(adapter->dev, "info: infra band=%d "
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index caaf4bd56b30..2cf8b964e966 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -693,7 +693,7 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
693 if (!ret) { 693 if (!ret) {
694 dev_notice(adapter->dev, 694 dev_notice(adapter->dev,
695 "WLAN FW already running! Skip FW dnld\n"); 695 "WLAN FW already running! Skip FW dnld\n");
696 goto done; 696 return 0;
697 } 697 }
698 698
699 poll_num = MAX_FIRMWARE_POLL_TRIES; 699 poll_num = MAX_FIRMWARE_POLL_TRIES;
@@ -719,14 +719,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
719poll_fw: 719poll_fw:
720 /* Check if the firmware is downloaded successfully or not */ 720 /* Check if the firmware is downloaded successfully or not */
721 ret = adapter->if_ops.check_fw_status(adapter, poll_num); 721 ret = adapter->if_ops.check_fw_status(adapter, poll_num);
722 if (ret) { 722 if (ret)
723 dev_err(adapter->dev, "FW failed to be active in time\n"); 723 dev_err(adapter->dev, "FW failed to be active in time\n");
724 return -1;
725 }
726done:
727 /* re-enable host interrupt for mwifiex after fw dnld is successful */
728 if (adapter->if_ops.enable_int)
729 adapter->if_ops.enable_int(adapter);
730 724
731 return ret; 725 return ret;
732} 726}
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 1c8a771e8e81..12e778159ec5 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1291,8 +1291,10 @@ int mwifiex_associate(struct mwifiex_private *priv,
1291{ 1291{
1292 u8 current_bssid[ETH_ALEN]; 1292 u8 current_bssid[ETH_ALEN];
1293 1293
1294 /* Return error if the adapter or table entry is not marked as infra */ 1294 /* Return error if the adapter is not STA role or table entry
1295 if ((priv->bss_mode != NL80211_IFTYPE_STATION) || 1295 * is not marked as infra.
1296 */
1297 if ((GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) ||
1296 (bss_desc->bss_mode != NL80211_IFTYPE_STATION)) 1298 (bss_desc->bss_mode != NL80211_IFTYPE_STATION))
1297 return -1; 1299 return -1;
1298 1300
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index e15ab72fb03d..1753431de361 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -427,6 +427,10 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
427 "Cal data request_firmware() failed\n"); 427 "Cal data request_firmware() failed\n");
428 } 428 }
429 429
430 /* enable host interrupt after fw dnld is successful */
431 if (adapter->if_ops.enable_int)
432 adapter->if_ops.enable_int(adapter);
433
430 adapter->init_wait_q_woken = false; 434 adapter->init_wait_q_woken = false;
431 ret = mwifiex_init_fw(adapter); 435 ret = mwifiex_init_fw(adapter);
432 if (ret == -1) { 436 if (ret == -1) {
@@ -478,6 +482,8 @@ err_add_intf:
478 mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev); 482 mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
479 rtnl_unlock(); 483 rtnl_unlock();
480err_init_fw: 484err_init_fw:
485 if (adapter->if_ops.disable_int)
486 adapter->if_ops.disable_int(adapter);
481 pr_debug("info: %s: unregister device\n", __func__); 487 pr_debug("info: %s: unregister device\n", __func__);
482 adapter->if_ops.unregister_dev(adapter); 488 adapter->if_ops.unregister_dev(adapter);
483done: 489done:
@@ -855,7 +861,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
855 INIT_WORK(&adapter->main_work, mwifiex_main_work_queue); 861 INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
856 862
857 /* Register the device. Fill up the private data structure with relevant 863 /* Register the device. Fill up the private data structure with relevant
858 information from the card and request for the required IRQ. */ 864 information from the card. */
859 if (adapter->if_ops.register_dev(adapter)) { 865 if (adapter->if_ops.register_dev(adapter)) {
860 pr_err("%s: failed to register mwifiex device\n", __func__); 866 pr_err("%s: failed to register mwifiex device\n", __func__);
861 goto err_registerdev; 867 goto err_registerdev;
@@ -919,6 +925,11 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
919 if (!adapter) 925 if (!adapter)
920 goto exit_remove; 926 goto exit_remove;
921 927
928 /* We can no longer handle interrupts once we start doing the teardown
929 * below. */
930 if (adapter->if_ops.disable_int)
931 adapter->if_ops.disable_int(adapter);
932
922 adapter->surprise_removed = true; 933 adapter->surprise_removed = true;
923 934
924 /* Stop data */ 935 /* Stop data */
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 3da73d36acdf..253e0bd38e25 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -601,6 +601,7 @@ struct mwifiex_if_ops {
601 int (*register_dev) (struct mwifiex_adapter *); 601 int (*register_dev) (struct mwifiex_adapter *);
602 void (*unregister_dev) (struct mwifiex_adapter *); 602 void (*unregister_dev) (struct mwifiex_adapter *);
603 int (*enable_int) (struct mwifiex_adapter *); 603 int (*enable_int) (struct mwifiex_adapter *);
604 void (*disable_int) (struct mwifiex_adapter *);
604 int (*process_int_status) (struct mwifiex_adapter *); 605 int (*process_int_status) (struct mwifiex_adapter *);
605 int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *, 606 int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *,
606 struct mwifiex_tx_param *); 607 struct mwifiex_tx_param *);
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 5ee5ed02eccd..09185c963248 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -51,6 +51,7 @@ static struct mwifiex_if_ops sdio_ops;
51static struct semaphore add_remove_card_sem; 51static struct semaphore add_remove_card_sem;
52 52
53static int mwifiex_sdio_resume(struct device *dev); 53static int mwifiex_sdio_resume(struct device *dev);
54static void mwifiex_sdio_interrupt(struct sdio_func *func);
54 55
55/* 56/*
56 * SDIO probe. 57 * SDIO probe.
@@ -296,6 +297,15 @@ static struct sdio_driver mwifiex_sdio = {
296 } 297 }
297}; 298};
298 299
300/* Write data into SDIO card register. Caller claims SDIO device. */
301static int
302mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data)
303{
304 int ret = -1;
305 sdio_writeb(func, data, reg, &ret);
306 return ret;
307}
308
299/* 309/*
300 * This function writes data into SDIO card register. 310 * This function writes data into SDIO card register.
301 */ 311 */
@@ -303,10 +313,10 @@ static int
303mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data) 313mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
304{ 314{
305 struct sdio_mmc_card *card = adapter->card; 315 struct sdio_mmc_card *card = adapter->card;
306 int ret = -1; 316 int ret;
307 317
308 sdio_claim_host(card->func); 318 sdio_claim_host(card->func);
309 sdio_writeb(card->func, data, reg, &ret); 319 ret = mwifiex_write_reg_locked(card->func, reg, data);
310 sdio_release_host(card->func); 320 sdio_release_host(card->func);
311 321
312 return ret; 322 return ret;
@@ -685,23 +695,15 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
685 * The host interrupt mask is read, the disable bit is reset and 695 * The host interrupt mask is read, the disable bit is reset and
686 * written back to the card host interrupt mask register. 696 * written back to the card host interrupt mask register.
687 */ 697 */
688static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) 698static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
689{ 699{
690 u8 host_int_mask, host_int_disable = HOST_INT_DISABLE; 700 struct sdio_mmc_card *card = adapter->card;
691 701 struct sdio_func *func = card->func;
692 /* Read back the host_int_mask register */
693 if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask))
694 return -1;
695
696 /* Update with the mask and write back to the register */
697 host_int_mask &= ~host_int_disable;
698
699 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) {
700 dev_err(adapter->dev, "disable host interrupt failed\n");
701 return -1;
702 }
703 702
704 return 0; 703 sdio_claim_host(func);
704 mwifiex_write_reg_locked(func, HOST_INT_MASK_REG, 0);
705 sdio_release_irq(func);
706 sdio_release_host(func);
705} 707}
706 708
707/* 709/*
@@ -713,14 +715,29 @@ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
713static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) 715static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
714{ 716{
715 struct sdio_mmc_card *card = adapter->card; 717 struct sdio_mmc_card *card = adapter->card;
718 struct sdio_func *func = card->func;
719 int ret;
720
721 sdio_claim_host(func);
722
723 /* Request the SDIO IRQ */
724 ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
725 if (ret) {
726 dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret);
727 goto out;
728 }
716 729
717 /* Simply write the mask to the register */ 730 /* Simply write the mask to the register */
718 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, 731 ret = mwifiex_write_reg_locked(func, HOST_INT_MASK_REG,
719 card->reg->host_int_enable)) { 732 card->reg->host_int_enable);
733 if (ret) {
720 dev_err(adapter->dev, "enable host interrupt failed\n"); 734 dev_err(adapter->dev, "enable host interrupt failed\n");
721 return -1; 735 sdio_release_irq(func);
722 } 736 }
723 return 0; 737
738out:
739 sdio_release_host(func);
740 return ret;
724} 741}
725 742
726/* 743/*
@@ -997,9 +1014,6 @@ mwifiex_sdio_interrupt(struct sdio_func *func)
997 } 1014 }
998 adapter = card->adapter; 1015 adapter = card->adapter;
999 1016
1000 if (adapter->surprise_removed)
1001 return;
1002
1003 if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP) 1017 if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
1004 adapter->ps_state = PS_STATE_AWAKE; 1018 adapter->ps_state = PS_STATE_AWAKE;
1005 1019
@@ -1625,8 +1639,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
1625 /* Allocate buffer and copy payload */ 1639 /* Allocate buffer and copy payload */
1626 blk_size = MWIFIEX_SDIO_BLOCK_SIZE; 1640 blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
1627 buf_block_len = (pkt_len + blk_size - 1) / blk_size; 1641 buf_block_len = (pkt_len + blk_size - 1) / blk_size;
1628 *(u16 *) &payload[0] = (u16) pkt_len; 1642 *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
1629 *(u16 *) &payload[2] = type; 1643 *(__le16 *)&payload[2] = cpu_to_le16(type);
1630 1644
1631 /* 1645 /*
1632 * This is SDIO specific header 1646 * This is SDIO specific header
@@ -1728,9 +1742,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
1728 struct sdio_mmc_card *card = adapter->card; 1742 struct sdio_mmc_card *card = adapter->card;
1729 1743
1730 if (adapter->card) { 1744 if (adapter->card) {
1731 /* Release the SDIO IRQ */
1732 sdio_claim_host(card->func); 1745 sdio_claim_host(card->func);
1733 sdio_release_irq(card->func);
1734 sdio_disable_func(card->func); 1746 sdio_disable_func(card->func);
1735 sdio_release_host(card->func); 1747 sdio_release_host(card->func);
1736 sdio_set_drvdata(card->func, NULL); 1748 sdio_set_drvdata(card->func, NULL);
@@ -1744,7 +1756,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
1744 */ 1756 */
1745static int mwifiex_register_dev(struct mwifiex_adapter *adapter) 1757static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1746{ 1758{
1747 int ret = 0; 1759 int ret;
1748 struct sdio_mmc_card *card = adapter->card; 1760 struct sdio_mmc_card *card = adapter->card;
1749 struct sdio_func *func = card->func; 1761 struct sdio_func *func = card->func;
1750 1762
@@ -1753,22 +1765,14 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1753 1765
1754 sdio_claim_host(func); 1766 sdio_claim_host(func);
1755 1767
1756 /* Request the SDIO IRQ */
1757 ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
1758 if (ret) {
1759 pr_err("claim irq failed: ret=%d\n", ret);
1760 goto disable_func;
1761 }
1762
1763 /* Set block size */ 1768 /* Set block size */
1764 ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE); 1769 ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
1770 sdio_release_host(func);
1765 if (ret) { 1771 if (ret) {
1766 pr_err("cannot set SDIO block size\n"); 1772 pr_err("cannot set SDIO block size\n");
1767 ret = -1; 1773 return ret;
1768 goto release_irq;
1769 } 1774 }
1770 1775
1771 sdio_release_host(func);
1772 sdio_set_drvdata(func, card); 1776 sdio_set_drvdata(func, card);
1773 1777
1774 adapter->dev = &func->dev; 1778 adapter->dev = &func->dev;
@@ -1776,15 +1780,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1776 strcpy(adapter->fw_name, card->firmware); 1780 strcpy(adapter->fw_name, card->firmware);
1777 1781
1778 return 0; 1782 return 0;
1779
1780release_irq:
1781 sdio_release_irq(func);
1782disable_func:
1783 sdio_disable_func(func);
1784 sdio_release_host(func);
1785 adapter->card = NULL;
1786
1787 return -1;
1788} 1783}
1789 1784
1790/* 1785/*
@@ -1813,9 +1808,6 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
1813 */ 1808 */
1814 mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg); 1809 mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg);
1815 1810
1816 /* Disable host interrupt mask register for SDIO */
1817 mwifiex_sdio_disable_host_int(adapter);
1818
1819 /* Get SDIO ioport */ 1811 /* Get SDIO ioport */
1820 mwifiex_init_sdio_ioport(adapter); 1812 mwifiex_init_sdio_ioport(adapter);
1821 1813
@@ -1957,6 +1949,7 @@ static struct mwifiex_if_ops sdio_ops = {
1957 .register_dev = mwifiex_register_dev, 1949 .register_dev = mwifiex_register_dev,
1958 .unregister_dev = mwifiex_unregister_dev, 1950 .unregister_dev = mwifiex_unregister_dev,
1959 .enable_int = mwifiex_sdio_enable_host_int, 1951 .enable_int = mwifiex_sdio_enable_host_int,
1952 .disable_int = mwifiex_sdio_disable_host_int,
1960 .process_int_status = mwifiex_process_int_status, 1953 .process_int_status = mwifiex_process_int_status,
1961 .host_to_card = mwifiex_sdio_host_to_card, 1954 .host_to_card = mwifiex_sdio_host_to_card,
1962 .wakeup = mwifiex_pm_wakeup_card, 1955 .wakeup = mwifiex_pm_wakeup_card,
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 6d51dfdd8251..532ae0ac4dfb 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -92,9 +92,6 @@
92/* Host Control Registers : Download host interrupt mask */ 92/* Host Control Registers : Download host interrupt mask */
93#define DN_LD_HOST_INT_MASK (0x2U) 93#define DN_LD_HOST_INT_MASK (0x2U)
94 94
95/* Disable Host interrupt mask */
96#define HOST_INT_DISABLE 0xff
97
98/* Host Control Registers : Host interrupt status */ 95/* Host Control Registers : Host interrupt status */
99#define HOST_INTSTATUS_REG 0x03 96#define HOST_INTSTATUS_REG 0x03
100/* Host Control Registers : Upload host interrupt status */ 97/* Host Control Registers : Upload host interrupt status */
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 206c3e038072..8af97abf7108 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -257,10 +257,10 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
257 goto done; 257 goto done;
258 } 258 }
259 259
260 if (priv->bss_mode == NL80211_IFTYPE_STATION) { 260 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
261 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
261 u8 config_bands; 262 u8 config_bands;
262 263
263 /* Infra mode */
264 ret = mwifiex_deauthenticate(priv, NULL); 264 ret = mwifiex_deauthenticate(priv, NULL);
265 if (ret) 265 if (ret)
266 goto done; 266 goto done;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 9b915d3a44be..3e60a31582f8 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,6 +1,6 @@
1menuconfig RT2X00 1menuconfig RT2X00
2 tristate "Ralink driver support" 2 tristate "Ralink driver support"
3 depends on MAC80211 3 depends on MAC80211 && HAS_DMA
4 ---help--- 4 ---help---
5 This will enable the support for the Ralink drivers, 5 This will enable the support for the Ralink drivers,
6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>. 6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6c0a91ff963c..aa95c6cf3545 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -936,13 +936,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
936 spin_unlock_irqrestore(&queue->index_lock, irqflags); 936 spin_unlock_irqrestore(&queue->index_lock, irqflags);
937} 937}
938 938
939void rt2x00queue_pause_queue(struct data_queue *queue) 939void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
940{ 940{
941 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
942 !test_bit(QUEUE_STARTED, &queue->flags) ||
943 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
944 return;
945
946 switch (queue->qid) { 941 switch (queue->qid) {
947 case QID_AC_VO: 942 case QID_AC_VO:
948 case QID_AC_VI: 943 case QID_AC_VI:
@@ -958,6 +953,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
958 break; 953 break;
959 } 954 }
960} 955}
956void rt2x00queue_pause_queue(struct data_queue *queue)
957{
958 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
959 !test_bit(QUEUE_STARTED, &queue->flags) ||
960 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
961 return;
962
963 rt2x00queue_pause_queue_nocheck(queue);
964}
961EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); 965EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
962 966
963void rt2x00queue_unpause_queue(struct data_queue *queue) 967void rt2x00queue_unpause_queue(struct data_queue *queue)
@@ -1019,7 +1023,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
1019 return; 1023 return;
1020 } 1024 }
1021 1025
1022 rt2x00queue_pause_queue(queue); 1026 rt2x00queue_pause_queue_nocheck(queue);
1023 1027
1024 queue->rt2x00dev->ops->lib->stop_queue(queue); 1028 queue->rt2x00dev->ops->lib->stop_queue(queue);
1025 1029
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 7253de3d8c66..c2ffce7a907c 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,27 +1,20 @@
1config RTLWIFI 1menuconfig RTL_CARDS
2 tristate "Realtek wireless card support" 2 tristate "Realtek rtlwifi family of devices"
3 depends on MAC80211 3 depends on MAC80211 && (PCI || USB)
4 select FW_LOADER
5 ---help---
6 This is common code for RTL8192CE/RTL8192CU/RTL8192SE/RTL8723AE
7 drivers. This module does nothing by itself - the various front-end
8 drivers need to be enabled to support any desired devices.
9
10 If you choose to build as a module, it'll be called rtlwifi.
11
12config RTLWIFI_DEBUG
13 bool "Debugging output for rtlwifi driver family"
14 depends on RTLWIFI
15 default y 4 default y
16 ---help--- 5 ---help---
17 To use the module option that sets the dynamic-debugging level for, 6 This option will enable support for the Realtek mac80211-based
18 the front-end driver, this parameter must be "Y". For memory-limited 7 wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de,
19 systems, choose "N". If in doubt, choose "Y". 8 rtl8723eu, and rtl8188eu share some common code.
9
10if RTL_CARDS
20 11
21config RTL8192CE 12config RTL8192CE
22 tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter" 13 tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
23 depends on RTLWIFI && PCI 14 depends on PCI
24 select RTL8192C_COMMON 15 select RTL8192C_COMMON
16 select RTLWIFI
17 select RTLWIFI_PCI
25 ---help--- 18 ---help---
26 This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe 19 This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
27 wireless network adapters. 20 wireless network adapters.
@@ -30,7 +23,9 @@ config RTL8192CE
30 23
31config RTL8192SE 24config RTL8192SE
32 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter" 25 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
33 depends on RTLWIFI && PCI 26 depends on PCI
27 select RTLWIFI
28 select RTLWIFI_PCI
34 ---help--- 29 ---help---
35 This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe 30 This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe
36 wireless network adapters. 31 wireless network adapters.
@@ -39,7 +34,9 @@ config RTL8192SE
39 34
40config RTL8192DE 35config RTL8192DE
41 tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter" 36 tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
42 depends on RTLWIFI && PCI 37 depends on PCI
38 select RTLWIFI
39 select RTLWIFI_PCI
43 ---help--- 40 ---help---
44 This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe 41 This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe
45 wireless network adapters. 42 wireless network adapters.
@@ -48,7 +45,9 @@ config RTL8192DE
48 45
49config RTL8723AE 46config RTL8723AE
50 tristate "Realtek RTL8723AE PCIe Wireless Network Adapter" 47 tristate "Realtek RTL8723AE PCIe Wireless Network Adapter"
51 depends on RTLWIFI && PCI 48 depends on PCI
49 select RTLWIFI
50 select RTLWIFI_PCI
52 ---help--- 51 ---help---
53 This is the driver for Realtek RTL8723AE 802.11n PCIe 52 This is the driver for Realtek RTL8723AE 802.11n PCIe
54 wireless network adapters. 53 wireless network adapters.
@@ -57,7 +56,9 @@ config RTL8723AE
57 56
58config RTL8188EE 57config RTL8188EE
59 tristate "Realtek RTL8188EE Wireless Network Adapter" 58 tristate "Realtek RTL8188EE Wireless Network Adapter"
60 depends on RTLWIFI && PCI 59 depends on PCI
60 select RTLWIFI
61 select RTLWIFI_PCI
61 ---help--- 62 ---help---
62 This is the driver for Realtek RTL8188EE 802.11n PCIe 63 This is the driver for Realtek RTL8188EE 802.11n PCIe
63 wireless network adapters. 64 wireless network adapters.
@@ -66,7 +67,9 @@ config RTL8188EE
66 67
67config RTL8192CU 68config RTL8192CU
68 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" 69 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
69 depends on RTLWIFI && USB 70 depends on USB
71 select RTLWIFI
72 select RTLWIFI_USB
70 select RTL8192C_COMMON 73 select RTL8192C_COMMON
71 ---help--- 74 ---help---
72 This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB 75 This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
@@ -74,7 +77,28 @@ config RTL8192CU
74 77
75 If you choose to build it as a module, it will be called rtl8192cu 78 If you choose to build it as a module, it will be called rtl8192cu
76 79
80config RTLWIFI
81 tristate
82 select FW_LOADER
83
84config RTLWIFI_PCI
85 tristate
86
87config RTLWIFI_USB
88 tristate
89
90config RTLWIFI_DEBUG
91 bool "Debugging output for rtlwifi driver family"
92 depends on RTLWIFI
93 default y
94 ---help---
95 To use the module option that sets the dynamic-debugging level for,
96 the front-end driver, this parameter must be "Y". For memory-limited
97 systems, choose "N". If in doubt, choose "Y".
98
77config RTL8192C_COMMON 99config RTL8192C_COMMON
78 tristate 100 tristate
79 depends on RTL8192CE || RTL8192CU 101 depends on RTL8192CE || RTL8192CU
80 default m 102 default y
103
104endif
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index ff02b874f8d8..d56f023a4b90 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -12,13 +12,11 @@ rtlwifi-objs := \
12 12
13rtl8192c_common-objs += \ 13rtl8192c_common-objs += \
14 14
15ifneq ($(CONFIG_PCI),) 15obj-$(CONFIG_RTLWIFI_PCI) += rtl_pci.o
16rtlwifi-objs += pci.o 16rtl_pci-objs := pci.o
17endif
18 17
19ifneq ($(CONFIG_USB),) 18obj-$(CONFIG_RTLWIFI_USB) += rtl_usb.o
20rtlwifi-objs += usb.o 19rtl_usb-objs := usb.o
21endif
22 20
23obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/ 21obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
24obj-$(CONFIG_RTL8192CE) += rtl8192ce/ 22obj-$(CONFIG_RTL8192CE) += rtl8192ce/
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 9d558ac77b0c..7651f5acc14b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -172,6 +172,7 @@ u8 rtl_tid_to_ac(u8 tid)
172{ 172{
173 return tid_to_ac[tid]; 173 return tid_to_ac[tid];
174} 174}
175EXPORT_SYMBOL_GPL(rtl_tid_to_ac);
175 176
176static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, 177static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
177 struct ieee80211_sta_ht_cap *ht_cap) 178 struct ieee80211_sta_ht_cap *ht_cap)
@@ -406,6 +407,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
406 cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); 407 cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
407 cancel_delayed_work(&rtlpriv->works.fwevt_wq); 408 cancel_delayed_work(&rtlpriv->works.fwevt_wq);
408} 409}
410EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
409 411
410void rtl_init_rfkill(struct ieee80211_hw *hw) 412void rtl_init_rfkill(struct ieee80211_hw *hw)
411{ 413{
@@ -439,6 +441,7 @@ void rtl_deinit_rfkill(struct ieee80211_hw *hw)
439{ 441{
440 wiphy_rfkill_stop_polling(hw->wiphy); 442 wiphy_rfkill_stop_polling(hw->wiphy);
441} 443}
444EXPORT_SYMBOL_GPL(rtl_deinit_rfkill);
442 445
443int rtl_init_core(struct ieee80211_hw *hw) 446int rtl_init_core(struct ieee80211_hw *hw)
444{ 447{
@@ -489,10 +492,12 @@ int rtl_init_core(struct ieee80211_hw *hw)
489 492
490 return 0; 493 return 0;
491} 494}
495EXPORT_SYMBOL_GPL(rtl_init_core);
492 496
493void rtl_deinit_core(struct ieee80211_hw *hw) 497void rtl_deinit_core(struct ieee80211_hw *hw)
494{ 498{
495} 499}
500EXPORT_SYMBOL_GPL(rtl_deinit_core);
496 501
497void rtl_init_rx_config(struct ieee80211_hw *hw) 502void rtl_init_rx_config(struct ieee80211_hw *hw)
498{ 503{
@@ -501,6 +506,7 @@ void rtl_init_rx_config(struct ieee80211_hw *hw)
501 506
502 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf)); 507 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
503} 508}
509EXPORT_SYMBOL_GPL(rtl_init_rx_config);
504 510
505/********************************************************* 511/*********************************************************
506 * 512 *
@@ -879,6 +885,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
879 885
880 return true; 886 return true;
881} 887}
888EXPORT_SYMBOL_GPL(rtl_tx_mgmt_proc);
882 889
883void rtl_get_tcb_desc(struct ieee80211_hw *hw, 890void rtl_get_tcb_desc(struct ieee80211_hw *hw,
884 struct ieee80211_tx_info *info, 891 struct ieee80211_tx_info *info,
@@ -1052,6 +1059,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1052 1059
1053 return true; 1060 return true;
1054} 1061}
1062EXPORT_SYMBOL_GPL(rtl_action_proc);
1055 1063
1056/*should call before software enc*/ 1064/*should call before software enc*/
1057u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) 1065u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
@@ -1125,6 +1133,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1125 1133
1126 return false; 1134 return false;
1127} 1135}
1136EXPORT_SYMBOL_GPL(rtl_is_special_data);
1128 1137
1129/********************************************************* 1138/*********************************************************
1130 * 1139 *
@@ -1300,6 +1309,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
1300 1309
1301 rtlpriv->link_info.bcn_rx_inperiod++; 1310 rtlpriv->link_info.bcn_rx_inperiod++;
1302} 1311}
1312EXPORT_SYMBOL_GPL(rtl_beacon_statistic);
1303 1313
1304void rtl_watchdog_wq_callback(void *data) 1314void rtl_watchdog_wq_callback(void *data)
1305{ 1315{
@@ -1793,6 +1803,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
1793 1803
1794 mac->vendor = vendor; 1804 mac->vendor = vendor;
1795} 1805}
1806EXPORT_SYMBOL_GPL(rtl_recognize_peer);
1796 1807
1797/********************************************************* 1808/*********************************************************
1798 * 1809 *
@@ -1849,6 +1860,7 @@ struct attribute_group rtl_attribute_group = {
1849 .name = "rtlsysfs", 1860 .name = "rtlsysfs",
1850 .attrs = rtl_sysfs_entries, 1861 .attrs = rtl_sysfs_entries,
1851}; 1862};
1863EXPORT_SYMBOL_GPL(rtl_attribute_group);
1852 1864
1853MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); 1865MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
1854MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); 1866MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
@@ -1856,7 +1868,8 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
1856MODULE_LICENSE("GPL"); 1868MODULE_LICENSE("GPL");
1857MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); 1869MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
1858 1870
1859struct rtl_global_var global_var = {}; 1871struct rtl_global_var rtl_global_var = {};
1872EXPORT_SYMBOL_GPL(rtl_global_var);
1860 1873
1861static int __init rtl_core_module_init(void) 1874static int __init rtl_core_module_init(void)
1862{ 1875{
@@ -1864,8 +1877,8 @@ static int __init rtl_core_module_init(void)
1864 pr_err("Unable to register rtl_rc, use default RC !!\n"); 1877 pr_err("Unable to register rtl_rc, use default RC !!\n");
1865 1878
1866 /* init some global vars */ 1879 /* init some global vars */
1867 INIT_LIST_HEAD(&global_var.glb_priv_list); 1880 INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
1868 spin_lock_init(&global_var.glb_list_lock); 1881 spin_lock_init(&rtl_global_var.glb_list_lock);
1869 1882
1870 return 0; 1883 return 0;
1871} 1884}
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 8576bc34b032..0e5fe0902daf 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -147,7 +147,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
147u8 rtl_tid_to_ac(u8 tid); 147u8 rtl_tid_to_ac(u8 tid);
148extern struct attribute_group rtl_attribute_group; 148extern struct attribute_group rtl_attribute_group;
149void rtl_easy_concurrent_retrytimer_callback(unsigned long data); 149void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
150extern struct rtl_global_var global_var; 150extern struct rtl_global_var rtl_global_var;
151int rtlwifi_rate_mapping(struct ieee80211_hw *hw, 151int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
152 bool isht, u8 desc_rate, bool first_ampdu); 152 bool isht, u8 desc_rate, bool first_ampdu);
153bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); 153bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index ee84844be008..733b7ce7f0e2 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1330,3 +1330,4 @@ const struct ieee80211_ops rtl_ops = {
1330 .rfkill_poll = rtl_op_rfkill_poll, 1330 .rfkill_poll = rtl_op_rfkill_poll,
1331 .flush = rtl_op_flush, 1331 .flush = rtl_op_flush,
1332}; 1332};
1333EXPORT_SYMBOL_GPL(rtl_ops);
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c
index 7d52d3d7769f..76e2086e137e 100644
--- a/drivers/net/wireless/rtlwifi/debug.c
+++ b/drivers/net/wireless/rtlwifi/debug.c
@@ -51,3 +51,4 @@ void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
51 51
52 /*Init Debug flag enable condition */ 52 /*Init Debug flag enable condition */
53} 53}
54EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init);
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 9e3894178e77..838a1ed3f194 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -229,6 +229,7 @@ void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
229 229
230 *pbuf = (u8) (value32 & 0xff); 230 *pbuf = (u8) (value32 & 0xff);
231} 231}
232EXPORT_SYMBOL_GPL(read_efuse_byte);
232 233
233void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) 234void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
234{ 235{
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index c97e9d327331..703f839af6ca 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -35,6 +35,13 @@
35#include "efuse.h" 35#include "efuse.h"
36#include <linux/export.h> 36#include <linux/export.h>
37#include <linux/kmemleak.h> 37#include <linux/kmemleak.h>
38#include <linux/module.h>
39
40MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
41MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
42MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
43MODULE_LICENSE("GPL");
44MODULE_DESCRIPTION("PCI basic driver for rtlwifi");
38 45
39static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { 46static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
40 PCI_VENDOR_ID_INTEL, 47 PCI_VENDOR_ID_INTEL,
@@ -1008,19 +1015,6 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
1008 return; 1015 return;
1009} 1016}
1010 1017
1011static void rtl_lps_change_work_callback(struct work_struct *work)
1012{
1013 struct rtl_works *rtlworks =
1014 container_of(work, struct rtl_works, lps_change_work);
1015 struct ieee80211_hw *hw = rtlworks->hw;
1016 struct rtl_priv *rtlpriv = rtl_priv(hw);
1017
1018 if (rtlpriv->enter_ps)
1019 rtl_lps_enter(hw);
1020 else
1021 rtl_lps_leave(hw);
1022}
1023
1024static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) 1018static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
1025{ 1019{
1026 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1020 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1899,7 +1893,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
1899 rtlpriv->rtlhal.interface = INTF_PCI; 1893 rtlpriv->rtlhal.interface = INTF_PCI;
1900 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data); 1894 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1901 rtlpriv->intf_ops = &rtl_pci_ops; 1895 rtlpriv->intf_ops = &rtl_pci_ops;
1902 rtlpriv->glb_var = &global_var; 1896 rtlpriv->glb_var = &rtl_global_var;
1903 1897
1904 /* 1898 /*
1905 *init dbgp flags before all 1899 *init dbgp flags before all
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 884bceae38a9..298b615964e8 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -269,6 +269,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
269 269
270 spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags); 270 spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
271} 271}
272EXPORT_SYMBOL_GPL(rtl_ips_nic_on);
272 273
273/*for FW LPS*/ 274/*for FW LPS*/
274 275
@@ -518,6 +519,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
518 "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed); 519 "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
519 } 520 }
520} 521}
522EXPORT_SYMBOL_GPL(rtl_swlps_beacon);
521 523
522void rtl_swlps_rf_awake(struct ieee80211_hw *hw) 524void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
523{ 525{
@@ -611,6 +613,19 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
611 MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40)); 613 MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
612} 614}
613 615
616void rtl_lps_change_work_callback(struct work_struct *work)
617{
618 struct rtl_works *rtlworks =
619 container_of(work, struct rtl_works, lps_change_work);
620 struct ieee80211_hw *hw = rtlworks->hw;
621 struct rtl_priv *rtlpriv = rtl_priv(hw);
622
623 if (rtlpriv->enter_ps)
624 rtl_lps_enter(hw);
625 else
626 rtl_lps_leave(hw);
627}
628EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
614 629
615void rtl_swlps_wq_callback(void *data) 630void rtl_swlps_wq_callback(void *data)
616{ 631{
@@ -922,3 +937,4 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
922 else 937 else
923 rtl_p2p_noa_ie(hw, data, len - FCS_LEN); 938 rtl_p2p_noa_ie(hw, data, len - FCS_LEN);
924} 939}
940EXPORT_SYMBOL_GPL(rtl_p2p_info);
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 4d682b753f50..88bd76ea88f7 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -49,5 +49,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
49void rtl_swlps_rf_sleep(struct ieee80211_hw *hw); 49void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
50void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); 50void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
51void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len); 51void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
52void rtl_lps_change_work_callback(struct work_struct *work);
52 53
53#endif 54#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a3532e077871..e56778cac9bf 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -32,6 +32,13 @@
32#include "ps.h" 32#include "ps.h"
33#include "rtl8192c/fw_common.h" 33#include "rtl8192c/fw_common.h"
34#include <linux/export.h> 34#include <linux/export.h>
35#include <linux/module.h>
36
37MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
38MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
39MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
40MODULE_LICENSE("GPL");
41MODULE_DESCRIPTION("USB basic driver for rtlwifi");
35 42
36#define REALTEK_USB_VENQT_READ 0xC0 43#define REALTEK_USB_VENQT_READ 0xC0
37#define REALTEK_USB_VENQT_WRITE 0x40 44#define REALTEK_USB_VENQT_WRITE 0x40
@@ -1070,6 +1077,8 @@ int rtl_usb_probe(struct usb_interface *intf,
1070 spin_lock_init(&rtlpriv->locks.usb_lock); 1077 spin_lock_init(&rtlpriv->locks.usb_lock);
1071 INIT_WORK(&rtlpriv->works.fill_h2c_cmd, 1078 INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
1072 rtl_fill_h2c_cmd_work_callback); 1079 rtl_fill_h2c_cmd_work_callback);
1080 INIT_WORK(&rtlpriv->works.lps_change_work,
1081 rtl_lps_change_work_callback);
1073 1082
1074 rtlpriv->usb_data_index = 0; 1083 rtlpriv->usb_data_index = 0;
1075 init_completion(&rtlpriv->firmware_loading_complete); 1084 init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 4941f201d6c8..b8ba1f925e75 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
98 goto exit; 98 goto exit;
99 99
100 err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, 100 err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
101 USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); 101 USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
102 if (err < 0) 102 if (err < 0)
103 goto exit; 103 goto exit;
104 104
105 memcpy(&ret, buf, sizeof(ret));
106
105 if (ret & 0x80) { 107 if (ret & 0x80) {
106 err = -EIO; 108 err = -EIO;
107 goto exit; 109 goto exit;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 6bb7cf2de556..b10ba00cc3e6 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
392 mem = (unsigned long) 392 mem = (unsigned long)
393 dt_alloc(size + 4, __alignof__(struct device_node)); 393 dt_alloc(size + 4, __alignof__(struct device_node));
394 394
395 memset((void *)mem, 0, size);
396
395 ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); 397 ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
396 398
397 pr_debug(" unflattening %lx...\n", mem); 399 pr_debug(" unflattening %lx...\n", mem);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a3c1c5aae6a9..1264923ade0f 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -345,6 +345,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
345 if (r && irq) { 345 if (r && irq) {
346 const char *name = NULL; 346 const char *name = NULL;
347 347
348 memset(r, 0, sizeof(*r));
348 /* 349 /*
349 * Get optional "interrupts-names" property to add a name 350 * Get optional "interrupts-names" property to add a name
350 * to the resource. 351 * to the resource.
@@ -482,8 +483,9 @@ void __init of_irq_init(const struct of_device_id *matches)
482 } 483 }
483 484
484 /* Get the next pending parent that might have children */ 485 /* Get the next pending parent that might have children */
485 desc = list_first_entry(&intc_parent_list, typeof(*desc), list); 486 desc = list_first_entry_or_null(&intc_parent_list,
486 if (list_empty(&intc_parent_list) || !desc) { 487 typeof(*desc), list);
488 if (!desc) {
487 pr_err("of_irq_init: children remain, but no parents\n"); 489 pr_err("of_irq_init: children remain, but no parents\n");
488 break; 490 break;
489 } 491 }
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index e79e006eb9ab..9ee04b4b68bf 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -811,18 +811,28 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
811 return pcidev->irq; 811 return pcidev->irq;
812} 812}
813 813
814static struct iosapic_info *first_isi = NULL; 814static struct iosapic_info *iosapic_list;
815 815
816#ifdef CONFIG_64BIT 816#ifdef CONFIG_64BIT
817int iosapic_serial_irq(int num) 817int iosapic_serial_irq(struct parisc_device *dev)
818{ 818{
819 struct iosapic_info *isi = first_isi; 819 struct iosapic_info *isi;
820 struct irt_entry *irte = NULL; /* only used if PAT PDC */ 820 struct irt_entry *irte;
821 struct vector_info *vi; 821 struct vector_info *vi;
822 int isi_line; /* line used by device */ 822 int cnt;
823 int intin;
824
825 intin = (dev->mod_info >> 24) & 15;
823 826
824 /* lookup IRT entry for isi/slot/pin set */ 827 /* lookup IRT entry for isi/slot/pin set */
825 irte = &irt_cell[num]; 828 for (cnt = 0; cnt < irt_num_entry; cnt++) {
829 irte = &irt_cell[cnt];
830 if (COMPARE_IRTE_ADDR(irte, dev->mod0) &&
831 irte->dest_iosapic_intin == intin)
832 break;
833 }
834 if (cnt >= irt_num_entry)
835 return 0; /* no irq found, force polling */
826 836
827 DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n", 837 DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
828 irte, 838 irte,
@@ -834,11 +844,17 @@ int iosapic_serial_irq(int num)
834 irte->src_seg_id, 844 irte->src_seg_id,
835 irte->dest_iosapic_intin, 845 irte->dest_iosapic_intin,
836 (u32) irte->dest_iosapic_addr); 846 (u32) irte->dest_iosapic_addr);
837 isi_line = irte->dest_iosapic_intin; 847
848 /* search for iosapic */
849 for (isi = iosapic_list; isi; isi = isi->isi_next)
850 if (isi->isi_hpa == dev->mod0)
851 break;
852 if (!isi)
853 return 0; /* no iosapic found, force polling */
838 854
839 /* get vector info for this input line */ 855 /* get vector info for this input line */
840 vi = isi->isi_vector + isi_line; 856 vi = isi->isi_vector + intin;
841 DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi); 857 DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", iosapic_intin, vi);
842 858
843 /* If this IRQ line has already been setup, skip it */ 859 /* If this IRQ line has already been setup, skip it */
844 if (vi->irte) 860 if (vi->irte)
@@ -941,8 +957,8 @@ void *iosapic_register(unsigned long hpa)
941 vip->irqline = (unsigned char) cnt; 957 vip->irqline = (unsigned char) cnt;
942 vip->iosapic = isi; 958 vip->iosapic = isi;
943 } 959 }
944 if (!first_isi) 960 isi->isi_next = iosapic_list;
945 first_isi = isi; 961 iosapic_list = isi;
946 return isi; 962 return isi;
947} 963}
948 964
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 13a633b1612e..7bf3926aecc0 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -86,10 +86,6 @@ struct mvebu_sw_pci_bridge {
86 u16 secondary_status; 86 u16 secondary_status;
87 u16 membase; 87 u16 membase;
88 u16 memlimit; 88 u16 memlimit;
89 u16 prefmembase;
90 u16 prefmemlimit;
91 u32 prefbaseupper;
92 u32 preflimitupper;
93 u16 iobaseupper; 89 u16 iobaseupper;
94 u16 iolimitupper; 90 u16 iolimitupper;
95 u8 cappointer; 91 u8 cappointer;
@@ -419,15 +415,7 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
419 break; 415 break;
420 416
421 case PCI_PREF_MEMORY_BASE: 417 case PCI_PREF_MEMORY_BASE:
422 *value = (bridge->prefmemlimit << 16 | bridge->prefmembase); 418 *value = 0;
423 break;
424
425 case PCI_PREF_BASE_UPPER32:
426 *value = bridge->prefbaseupper;
427 break;
428
429 case PCI_PREF_LIMIT_UPPER32:
430 *value = bridge->preflimitupper;
431 break; 419 break;
432 420
433 case PCI_IO_BASE_UPPER16: 421 case PCI_IO_BASE_UPPER16:
@@ -501,19 +489,6 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
501 mvebu_pcie_handle_membase_change(port); 489 mvebu_pcie_handle_membase_change(port);
502 break; 490 break;
503 491
504 case PCI_PREF_MEMORY_BASE:
505 bridge->prefmembase = value & 0xffff;
506 bridge->prefmemlimit = value >> 16;
507 break;
508
509 case PCI_PREF_BASE_UPPER32:
510 bridge->prefbaseupper = value;
511 break;
512
513 case PCI_PREF_LIMIT_UPPER32:
514 bridge->preflimitupper = value;
515 break;
516
517 case PCI_IO_BASE_UPPER16: 492 case PCI_IO_BASE_UPPER16:
518 bridge->iobaseupper = value & 0xffff; 493 bridge->iobaseupper = value & 0xffff;
519 bridge->iolimitupper = value >> 16; 494 bridge->iolimitupper = value >> 16;
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index bb7ebb22db01..d85009de713d 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -3,16 +3,13 @@
3# 3#
4 4
5menuconfig HOTPLUG_PCI 5menuconfig HOTPLUG_PCI
6 tristate "Support for PCI Hotplug" 6 bool "Support for PCI Hotplug"
7 depends on PCI && SYSFS 7 depends on PCI && SYSFS
8 ---help--- 8 ---help---
9 Say Y here if you have a motherboard with a PCI Hotplug controller. 9 Say Y here if you have a motherboard with a PCI Hotplug controller.
10 This allows you to add and remove PCI cards while the machine is 10 This allows you to add and remove PCI cards while the machine is
11 powered up and running. 11 powered up and running.
12 12
13 To compile this driver as a module, choose M here: the
14 module will be called pci_hotplug.
15
16 When in doubt, say N. 13 When in doubt, say N.
17 14
18if HOTPLUG_PCI 15if HOTPLUG_PCI
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index aac7a40e4a4a..0e0d0f7f63fd 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -92,7 +92,14 @@ int pciehp_unconfigure_device(struct slot *p_slot)
92 if (ret) 92 if (ret)
93 presence = 0; 93 presence = 0;
94 94
95 list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) { 95 /*
96 * Stopping an SR-IOV PF device removes all the associated VFs,
97 * which will update the bus->devices list and confuse the
98 * iterator. Therefore, iterate in reverse so we remove the VFs
99 * first, then the PF. We do the same in pci_stop_bus_device().
100 */
101 list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
102 bus_list) {
96 pci_dev_get(dev); 103 pci_dev_get(dev);
97 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { 104 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
98 pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl); 105 pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index b29e20b7862f..bb7af78e4eed 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -388,7 +388,6 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
388 /* Remove the EADS bridge device itself */ 388 /* Remove the EADS bridge device itself */
389 BUG_ON(!bus->self); 389 BUG_ON(!bus->self);
390 pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); 390 pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
391 eeh_remove_bus_device(bus->self, true);
392 pci_stop_and_remove_bus_device(bus->self); 391 pci_stop_and_remove_bus_device(bus->self);
393 392
394 return 0; 393 return 0;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index dbdc5f7e2b29..01e264fb50e0 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
317/* ACPI bus type */ 317/* ACPI bus type */
318static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) 318static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
319{ 319{
320 struct pci_dev * pci_dev; 320 struct pci_dev *pci_dev = to_pci_dev(dev);
321 u64 addr; 321 bool is_bridge;
322 u64 addr;
322 323
323 pci_dev = to_pci_dev(dev); 324 /*
325 * pci_is_bridge() is not suitable here, because pci_dev->subordinate
326 * is set only after acpi_pci_find_device() has been called for the
327 * given device.
328 */
329 is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
330 || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
324 /* Please ref to ACPI spec for the syntax of _ADR */ 331 /* Please ref to ACPI spec for the syntax of _ADR */
325 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); 332 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
326 *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr); 333 *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
327 if (!*handle) 334 if (!*handle)
328 return -ENODEV; 335 return -ENODEV;
329 return 0; 336 return 0;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 569f82fc9e22..3b94cfcfa03b 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -14,15 +14,12 @@ config PCIEPORTBUS
14# Include service Kconfig here 14# Include service Kconfig here
15# 15#
16config HOTPLUG_PCI_PCIE 16config HOTPLUG_PCI_PCIE
17 tristate "PCI Express Hotplug driver" 17 bool "PCI Express Hotplug driver"
18 depends on HOTPLUG_PCI && PCIEPORTBUS 18 depends on HOTPLUG_PCI && PCIEPORTBUS
19 help 19 help
20 Say Y here if you have a motherboard that supports PCI Express Native 20 Say Y here if you have a motherboard that supports PCI Express Native
21 Hotplug 21 Hotplug
22 22
23 To compile this driver as a module, choose M here: the
24 module will be called pciehp.
25
26 When in doubt, say N. 23 When in doubt, say N.
27 24
28source "drivers/pci/pcie/aer/Kconfig" 25source "drivers/pci/pcie/aer/Kconfig"
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index d254e2379533..64a7de22d9af 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -300,6 +300,47 @@ static void assign_requested_resources_sorted(struct list_head *head,
300 } 300 }
301} 301}
302 302
303static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
304{
305 struct pci_dev_resource *fail_res;
306 unsigned long mask = 0;
307
308 /* check failed type */
309 list_for_each_entry(fail_res, fail_head, list)
310 mask |= fail_res->flags;
311
312 /*
313 * one pref failed resource will set IORESOURCE_MEM,
314 * as we can allocate pref in non-pref range.
315 * Will release all assigned non-pref sibling resources
316 * according to that bit.
317 */
318 return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
319}
320
321static bool pci_need_to_release(unsigned long mask, struct resource *res)
322{
323 if (res->flags & IORESOURCE_IO)
324 return !!(mask & IORESOURCE_IO);
325
326 /* check pref at first */
327 if (res->flags & IORESOURCE_PREFETCH) {
328 if (mask & IORESOURCE_PREFETCH)
329 return true;
330 /* count pref if its parent is non-pref */
331 else if ((mask & IORESOURCE_MEM) &&
332 !(res->parent->flags & IORESOURCE_PREFETCH))
333 return true;
334 else
335 return false;
336 }
337
338 if (res->flags & IORESOURCE_MEM)
339 return !!(mask & IORESOURCE_MEM);
340
341 return false; /* should not get here */
342}
343
303static void __assign_resources_sorted(struct list_head *head, 344static void __assign_resources_sorted(struct list_head *head,
304 struct list_head *realloc_head, 345 struct list_head *realloc_head,
305 struct list_head *fail_head) 346 struct list_head *fail_head)
@@ -312,11 +353,24 @@ static void __assign_resources_sorted(struct list_head *head,
312 * if could do that, could get out early. 353 * if could do that, could get out early.
313 * if could not do that, we still try to assign requested at first, 354 * if could not do that, we still try to assign requested at first,
314 * then try to reassign add_size for some resources. 355 * then try to reassign add_size for some resources.
356 *
357 * Separate three resource type checking if we need to release
358 * assigned resource after requested + add_size try.
359 * 1. if there is io port assign fail, will release assigned
360 * io port.
361 * 2. if there is pref mmio assign fail, release assigned
362 * pref mmio.
363 * if assigned pref mmio's parent is non-pref mmio and there
364 * is non-pref mmio assign fail, will release that assigned
365 * pref mmio.
366 * 3. if there is non-pref mmio assign fail or pref mmio
367 * assigned fail, will release assigned non-pref mmio.
315 */ 368 */
316 LIST_HEAD(save_head); 369 LIST_HEAD(save_head);
317 LIST_HEAD(local_fail_head); 370 LIST_HEAD(local_fail_head);
318 struct pci_dev_resource *save_res; 371 struct pci_dev_resource *save_res;
319 struct pci_dev_resource *dev_res; 372 struct pci_dev_resource *dev_res, *tmp_res;
373 unsigned long fail_type;
320 374
321 /* Check if optional add_size is there */ 375 /* Check if optional add_size is there */
322 if (!realloc_head || list_empty(realloc_head)) 376 if (!realloc_head || list_empty(realloc_head))
@@ -348,6 +402,19 @@ static void __assign_resources_sorted(struct list_head *head,
348 return; 402 return;
349 } 403 }
350 404
405 /* check failed type */
406 fail_type = pci_fail_res_type_mask(&local_fail_head);
407 /* remove not need to be released assigned res from head list etc */
408 list_for_each_entry_safe(dev_res, tmp_res, head, list)
409 if (dev_res->res->parent &&
410 !pci_need_to_release(fail_type, dev_res->res)) {
411 /* remove it from realloc_head list */
412 remove_from_list(realloc_head, dev_res->res);
413 remove_from_list(&save_head, dev_res->res);
414 list_del(&dev_res->list);
415 kfree(dev_res);
416 }
417
351 free_list(&local_fail_head); 418 free_list(&local_fail_head);
352 /* Release assigned resource */ 419 /* Release assigned resource */
353 list_for_each_entry(dev_res, head, list) 420 list_for_each_entry(dev_res, head, list)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5b272bfd261d..2a00239661b3 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1193,6 +1193,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
1193 list_for_each_entry(maps_node, &pinctrl_maps, node) { 1193 list_for_each_entry(maps_node, &pinctrl_maps, node) {
1194 if (maps_node->maps == map) { 1194 if (maps_node->maps == map) {
1195 list_del(&maps_node->node); 1195 list_del(&maps_node->node);
1196 kfree(maps_node);
1196 mutex_unlock(&pinctrl_maps_mutex); 1197 mutex_unlock(&pinctrl_maps_mutex);
1197 return; 1198 return;
1198 } 1199 }
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 6866548fab31..7323cca440b5 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1483,6 +1483,7 @@ static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
1483 return ret; 1483 return ret;
1484} 1484}
1485 1485
1486#ifdef CONFIG_PM
1486static int pinctrl_single_suspend(struct platform_device *pdev, 1487static int pinctrl_single_suspend(struct platform_device *pdev,
1487 pm_message_t state) 1488 pm_message_t state)
1488{ 1489{
@@ -1505,6 +1506,7 @@ static int pinctrl_single_resume(struct platform_device *pdev)
1505 1506
1506 return pinctrl_force_default(pcs->pctl); 1507 return pinctrl_force_default(pcs->pctl);
1507} 1508}
1509#endif
1508 1510
1509static int pcs_probe(struct platform_device *pdev) 1511static int pcs_probe(struct platform_device *pdev)
1510{ 1512{
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index c47fd1e5450b..94716c779800 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -278,6 +278,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
278{ 278{
279 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 279 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
280 struct sunxi_pinctrl_group *g = &pctl->groups[group]; 280 struct sunxi_pinctrl_group *g = &pctl->groups[group];
281 unsigned long flags;
281 u32 val, mask; 282 u32 val, mask;
282 u16 strength; 283 u16 strength;
283 u8 dlevel; 284 u8 dlevel;
@@ -295,22 +296,35 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
295 * 3: 40mA 296 * 3: 40mA
296 */ 297 */
297 dlevel = strength / 10 - 1; 298 dlevel = strength / 10 - 1;
299
300 spin_lock_irqsave(&pctl->lock, flags);
301
298 val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); 302 val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
299 mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); 303 mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
300 writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), 304 writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
301 pctl->membase + sunxi_dlevel_reg(g->pin)); 305 pctl->membase + sunxi_dlevel_reg(g->pin));
306
307 spin_unlock_irqrestore(&pctl->lock, flags);
302 break; 308 break;
303 case PIN_CONFIG_BIAS_PULL_UP: 309 case PIN_CONFIG_BIAS_PULL_UP:
310 spin_lock_irqsave(&pctl->lock, flags);
311
304 val = readl(pctl->membase + sunxi_pull_reg(g->pin)); 312 val = readl(pctl->membase + sunxi_pull_reg(g->pin));
305 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); 313 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
306 writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), 314 writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
307 pctl->membase + sunxi_pull_reg(g->pin)); 315 pctl->membase + sunxi_pull_reg(g->pin));
316
317 spin_unlock_irqrestore(&pctl->lock, flags);
308 break; 318 break;
309 case PIN_CONFIG_BIAS_PULL_DOWN: 319 case PIN_CONFIG_BIAS_PULL_DOWN:
320 spin_lock_irqsave(&pctl->lock, flags);
321
310 val = readl(pctl->membase + sunxi_pull_reg(g->pin)); 322 val = readl(pctl->membase + sunxi_pull_reg(g->pin));
311 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); 323 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
312 writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), 324 writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
313 pctl->membase + sunxi_pull_reg(g->pin)); 325 pctl->membase + sunxi_pull_reg(g->pin));
326
327 spin_unlock_irqrestore(&pctl->lock, flags);
314 break; 328 break;
315 default: 329 default:
316 break; 330 break;
@@ -360,11 +374,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
360 u8 config) 374 u8 config)
361{ 375{
362 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 376 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
377 unsigned long flags;
378 u32 val, mask;
379
380 spin_lock_irqsave(&pctl->lock, flags);
363 381
364 u32 val = readl(pctl->membase + sunxi_mux_reg(pin)); 382 val = readl(pctl->membase + sunxi_mux_reg(pin));
365 u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin); 383 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
366 writel((val & ~mask) | config << sunxi_mux_offset(pin), 384 writel((val & ~mask) | config << sunxi_mux_offset(pin),
367 pctl->membase + sunxi_mux_reg(pin)); 385 pctl->membase + sunxi_mux_reg(pin));
386
387 spin_unlock_irqrestore(&pctl->lock, flags);
368} 388}
369 389
370static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, 390static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
@@ -464,8 +484,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
464 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); 484 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
465 u32 reg = sunxi_data_reg(offset); 485 u32 reg = sunxi_data_reg(offset);
466 u8 index = sunxi_data_offset(offset); 486 u8 index = sunxi_data_offset(offset);
487 unsigned long flags;
488 u32 regval;
489
490 spin_lock_irqsave(&pctl->lock, flags);
491
492 regval = readl(pctl->membase + reg);
467 493
468 writel((value & DATA_PINS_MASK) << index, pctl->membase + reg); 494 if (value)
495 regval |= BIT(index);
496 else
497 regval &= ~(BIT(index));
498
499 writel(regval, pctl->membase + reg);
500
501 spin_unlock_irqrestore(&pctl->lock, flags);
469} 502}
470 503
471static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, 504static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -526,6 +559,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
526 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 559 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
527 u32 reg = sunxi_irq_cfg_reg(d->hwirq); 560 u32 reg = sunxi_irq_cfg_reg(d->hwirq);
528 u8 index = sunxi_irq_cfg_offset(d->hwirq); 561 u8 index = sunxi_irq_cfg_offset(d->hwirq);
562 unsigned long flags;
563 u32 regval;
529 u8 mode; 564 u8 mode;
530 565
531 switch (type) { 566 switch (type) {
@@ -548,7 +583,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
548 return -EINVAL; 583 return -EINVAL;
549 } 584 }
550 585
551 writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg); 586 spin_lock_irqsave(&pctl->lock, flags);
587
588 regval = readl(pctl->membase + reg);
589 regval &= ~IRQ_CFG_IRQ_MASK;
590 writel(regval | (mode << index), pctl->membase + reg);
591
592 spin_unlock_irqrestore(&pctl->lock, flags);
552 593
553 return 0; 594 return 0;
554} 595}
@@ -560,14 +601,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d)
560 u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); 601 u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq);
561 u32 status_reg = sunxi_irq_status_reg(d->hwirq); 602 u32 status_reg = sunxi_irq_status_reg(d->hwirq);
562 u8 status_idx = sunxi_irq_status_offset(d->hwirq); 603 u8 status_idx = sunxi_irq_status_offset(d->hwirq);
604 unsigned long flags;
563 u32 val; 605 u32 val;
564 606
607 spin_lock_irqsave(&pctl->lock, flags);
608
565 /* Mask the IRQ */ 609 /* Mask the IRQ */
566 val = readl(pctl->membase + ctrl_reg); 610 val = readl(pctl->membase + ctrl_reg);
567 writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); 611 writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg);
568 612
569 /* Clear the IRQ */ 613 /* Clear the IRQ */
570 writel(1 << status_idx, pctl->membase + status_reg); 614 writel(1 << status_idx, pctl->membase + status_reg);
615
616 spin_unlock_irqrestore(&pctl->lock, flags);
571} 617}
572 618
573static void sunxi_pinctrl_irq_mask(struct irq_data *d) 619static void sunxi_pinctrl_irq_mask(struct irq_data *d)
@@ -575,11 +621,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
575 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 621 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
576 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 622 u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
577 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 623 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
624 unsigned long flags;
578 u32 val; 625 u32 val;
579 626
627 spin_lock_irqsave(&pctl->lock, flags);
628
580 /* Mask the IRQ */ 629 /* Mask the IRQ */
581 val = readl(pctl->membase + reg); 630 val = readl(pctl->membase + reg);
582 writel(val & ~(1 << idx), pctl->membase + reg); 631 writel(val & ~(1 << idx), pctl->membase + reg);
632
633 spin_unlock_irqrestore(&pctl->lock, flags);
583} 634}
584 635
585static void sunxi_pinctrl_irq_unmask(struct irq_data *d) 636static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
@@ -588,6 +639,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
588 struct sunxi_desc_function *func; 639 struct sunxi_desc_function *func;
589 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 640 u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
590 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 641 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
642 unsigned long flags;
591 u32 val; 643 u32 val;
592 644
593 func = sunxi_pinctrl_desc_find_function_by_pin(pctl, 645 func = sunxi_pinctrl_desc_find_function_by_pin(pctl,
@@ -597,9 +649,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
597 /* Change muxing to INT mode */ 649 /* Change muxing to INT mode */
598 sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); 650 sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval);
599 651
652 spin_lock_irqsave(&pctl->lock, flags);
653
600 /* Unmask the IRQ */ 654 /* Unmask the IRQ */
601 val = readl(pctl->membase + reg); 655 val = readl(pctl->membase + reg);
602 writel(val | (1 << idx), pctl->membase + reg); 656 writel(val | (1 << idx), pctl->membase + reg);
657
658 spin_unlock_irqrestore(&pctl->lock, flags);
603} 659}
604 660
605static struct irq_chip sunxi_pinctrl_irq_chip = { 661static struct irq_chip sunxi_pinctrl_irq_chip = {
@@ -752,6 +808,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev)
752 return -ENOMEM; 808 return -ENOMEM;
753 platform_set_drvdata(pdev, pctl); 809 platform_set_drvdata(pdev, pctl);
754 810
811 spin_lock_init(&pctl->lock);
812
755 pctl->membase = of_iomap(node, 0); 813 pctl->membase = of_iomap(node, 0);
756 if (!pctl->membase) 814 if (!pctl->membase)
757 return -ENOMEM; 815 return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index d68047d8f699..01c494f8a14f 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -14,6 +14,7 @@
14#define __PINCTRL_SUNXI_H 14#define __PINCTRL_SUNXI_H
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/spinlock.h>
17 18
18#define PA_BASE 0 19#define PA_BASE 0
19#define PB_BASE 32 20#define PB_BASE 32
@@ -407,6 +408,7 @@ struct sunxi_pinctrl {
407 unsigned ngroups; 408 unsigned ngroups;
408 int irq; 409 int irq;
409 int irq_array[SUNXI_IRQ_NUMBER]; 410 int irq_array[SUNXI_IRQ_NUMBER];
411 spinlock_t lock;
410 struct pinctrl_dev *pctl_dev; 412 struct pinctrl_dev *pctl_dev;
411}; 413};
412 414
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index 7956df58d751..31f7d0e04aaa 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -3785,6 +3785,7 @@ static const struct regulator_desc sh73a0_vccq_mc0_desc = {
3785 3785
3786static struct regulator_consumer_supply sh73a0_vccq_mc0_consumers[] = { 3786static struct regulator_consumer_supply sh73a0_vccq_mc0_consumers[] = {
3787 REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), 3787 REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
3788 REGULATOR_SUPPLY("vqmmc", "ee100000.sdhi"),
3788}; 3789};
3789 3790
3790static const struct regulator_init_data sh73a0_vccq_mc0_init_data = { 3791static const struct regulator_init_data sh73a0_vccq_mc0_init_data = {
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
index 1fa39a444171..867c9681763c 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c
@@ -496,7 +496,7 @@ static const unsigned sdmmc5_pins[] = { 24, 25, 26 };
496static const struct sirfsoc_muxmask usp0_muxmask[] = { 496static const struct sirfsoc_muxmask usp0_muxmask[] = {
497 { 497 {
498 .group = 1, 498 .group = 1,
499 .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22), 499 .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
500 }, 500 },
501}; 501};
502 502
@@ -507,8 +507,21 @@ static const struct sirfsoc_padmux usp0_padmux = {
507 .funcval = 0, 507 .funcval = 0,
508}; 508};
509 509
510static const unsigned usp0_pins[] = { 51, 52, 53, 54 }; 510static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
511 511
512static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
513 {
514 .group = 1,
515 .mask = BIT(20) | BIT(21),
516 },
517};
518
519static const struct sirfsoc_padmux usp0_uart_nostreamctrl_padmux = {
520 .muxmask_counts = ARRAY_SIZE(usp0_uart_nostreamctrl_muxmask),
521 .muxmask = usp0_uart_nostreamctrl_muxmask,
522};
523
524static const unsigned usp0_uart_nostreamctrl_pins[] = { 52, 53 };
512static const struct sirfsoc_muxmask usp1_muxmask[] = { 525static const struct sirfsoc_muxmask usp1_muxmask[] = {
513 { 526 {
514 .group = 0, 527 .group = 0,
@@ -822,6 +835,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
822 SIRFSOC_PIN_GROUP("uart2grp", uart2_pins), 835 SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
823 SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins), 836 SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
824 SIRFSOC_PIN_GROUP("usp0grp", usp0_pins), 837 SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
838 SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
839 usp0_uart_nostreamctrl_pins),
825 SIRFSOC_PIN_GROUP("usp1grp", usp1_pins), 840 SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
826 SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins), 841 SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
827 SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins), 842 SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
@@ -862,6 +877,8 @@ static const char * const uart0grp[] = { "uart0grp" };
862static const char * const uart1grp[] = { "uart1grp" }; 877static const char * const uart1grp[] = { "uart1grp" };
863static const char * const uart2grp[] = { "uart2grp" }; 878static const char * const uart2grp[] = { "uart2grp" };
864static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" }; 879static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
880static const char * const usp0_uart_nostreamctrl_grp[] = {
881 "usp0_uart_nostreamctrl_grp" };
865static const char * const usp0grp[] = { "usp0grp" }; 882static const char * const usp0grp[] = { "usp0grp" };
866static const char * const usp1grp[] = { "usp1grp" }; 883static const char * const usp1grp[] = { "usp1grp" };
867static const char * const i2c0grp[] = { "i2c0grp" }; 884static const char * const i2c0grp[] = { "i2c0grp" };
@@ -904,6 +921,9 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
904 SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux), 921 SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
905 SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux), 922 SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
906 SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux), 923 SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
924 SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
925 usp0_uart_nostreamctrl_grp,
926 usp0_uart_nostreamctrl_padmux),
907 SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux), 927 SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
908 SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux), 928 SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
909 SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux), 929 SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 0f9f8596b300..f9119525f557 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void)
330 return platform_driver_register(&olpc_ec_plat_driver); 330 return platform_driver_register(&olpc_ec_plat_driver);
331} 331}
332 332
333module_init(olpc_ec_init_module); 333arch_initcall(olpc_ec_init_module);
334 334
335MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); 335MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
336MODULE_LICENSE("GPL"); 336MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 97bb05edcb5a..d6970f47ae72 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -53,7 +53,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
53#define HPWMI_ALS_QUERY 0x3 53#define HPWMI_ALS_QUERY 0x3
54#define HPWMI_HARDWARE_QUERY 0x4 54#define HPWMI_HARDWARE_QUERY 0x4
55#define HPWMI_WIRELESS_QUERY 0x5 55#define HPWMI_WIRELESS_QUERY 0x5
56#define HPWMI_BIOS_QUERY 0x9
57#define HPWMI_HOTKEY_QUERY 0xc 56#define HPWMI_HOTKEY_QUERY 0xc
58#define HPWMI_WIRELESS2_QUERY 0x1b 57#define HPWMI_WIRELESS2_QUERY 0x1b
59#define HPWMI_POSTCODEERROR_QUERY 0x2a 58#define HPWMI_POSTCODEERROR_QUERY 0x2a
@@ -293,19 +292,6 @@ static int hp_wmi_tablet_state(void)
293 return (state & 0x4) ? 1 : 0; 292 return (state & 0x4) ? 1 : 0;
294} 293}
295 294
296static int hp_wmi_enable_hotkeys(void)
297{
298 int ret;
299 int query = 0x6e;
300
301 ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
302 0);
303
304 if (ret)
305 return -EINVAL;
306 return 0;
307}
308
309static int hp_wmi_set_block(void *data, bool blocked) 295static int hp_wmi_set_block(void *data, bool blocked)
310{ 296{
311 enum hp_wmi_radio r = (enum hp_wmi_radio) data; 297 enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -1009,8 +995,6 @@ static int __init hp_wmi_init(void)
1009 err = hp_wmi_input_setup(); 995 err = hp_wmi_input_setup();
1010 if (err) 996 if (err)
1011 return err; 997 return err;
1012
1013 hp_wmi_enable_hotkeys();
1014 } 998 }
1015 999
1016 if (bios_capable) { 1000 if (bios_capable) {
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 2ac045f27f10..3a1b6bf326a8 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -2440,7 +2440,10 @@ static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
2440 if (pos < 0) 2440 if (pos < 0)
2441 return pos; 2441 return pos;
2442 2442
2443 return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina"); 2443 return snprintf(buffer, PAGE_SIZE, "%s\n",
2444 pos == SPEED ? "speed" :
2445 pos == STAMINA ? "stamina" :
2446 pos == AUTO ? "auto" : "unknown");
2444} 2447}
2445 2448
2446static int sony_nc_gfx_switch_setup(struct platform_device *pd, 2449static int sony_nc_gfx_switch_setup(struct platform_device *pd,
@@ -4320,7 +4323,8 @@ static int sony_pic_add(struct acpi_device *device)
4320 goto err_free_resources; 4323 goto err_free_resources;
4321 } 4324 }
4322 4325
4323 if (sonypi_compat_init()) 4326 result = sonypi_compat_init();
4327 if (result)
4324 goto err_remove_input; 4328 goto err_remove_input;
4325 4329
4326 /* request io port */ 4330 /* request io port */
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index f4f30af2df68..2e8a20cac588 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1715,11 +1715,13 @@ int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops)
1715 (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) 1715 (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops))
1716 port->nscan = NULL; 1716 port->nscan = NULL;
1717 1717
1718 list_for_each_entry(scan, &rio_scans, node) 1718 list_for_each_entry(scan, &rio_scans, node) {
1719 if (scan->mport_id == mport_id) { 1719 if (scan->mport_id == mport_id) {
1720 list_del(&scan->node); 1720 list_del(&scan->node);
1721 kfree(scan); 1721 kfree(scan);
1722 break;
1722 } 1723 }
1724 }
1723 1725
1724 mutex_unlock(&rio_mport_list_lock); 1726 mutex_unlock(&rio_mport_list_lock);
1725 1727
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 767fee2ab340..26019531db15 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/delay.h>
26#include <linux/rtc.h> 27#include <linux/rtc.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/of_device.h> 29#include <linux/of_device.h>
@@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
119} 120}
120#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ 121#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */
121 122
122static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) 123static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
123{ 124{
125 int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */
124 /* 126 /*
125 * The datasheet doesn't say which way round the 127 * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010
126 * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0, 128 * states:
127 * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS 129 * | The order in which registers are updated is
130 * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds.
131 * | (This list is in bitfield order, from LSB to MSB, as they would
132 * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT
133 * | register. For example, the Seconds register corresponds to
134 * | STALE_REGS or NEW_REGS containing 0x80.)
128 */ 135 */
129 while (readl(rtc_data->io + STMP3XXX_RTC_STAT) & 136 do {
130 (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) 137 if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) &
131 cpu_relax(); 138 (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)))
139 return 0;
140 udelay(1);
141 } while (--timeout > 0);
142 return (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
143 (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0;
132} 144}
133 145
134/* Time read/write */ 146/* Time read/write */
135static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) 147static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
136{ 148{
149 int ret;
137 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); 150 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
138 151
139 stmp3xxx_wait_time(rtc_data); 152 ret = stmp3xxx_wait_time(rtc_data);
153 if (ret)
154 return ret;
155
140 rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); 156 rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
141 return 0; 157 return 0;
142} 158}
@@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
146 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); 162 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
147 163
148 writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); 164 writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS);
149 stmp3xxx_wait_time(rtc_data); 165 return stmp3xxx_wait_time(rtc_data);
150 return 0;
151} 166}
152 167
153/* interrupt(s) handler */ 168/* interrupt(s) handler */
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 02faf3c4e0d5..c2e80d7ca5e2 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -524,6 +524,8 @@ static int twl_rtc_probe(struct platform_device *pdev)
524 if (ret < 0) 524 if (ret < 0)
525 goto out1; 525 goto out1;
526 526
527 device_init_wakeup(&pdev->dev, 1);
528
527 rtc = rtc_device_register(pdev->name, 529 rtc = rtc_device_register(pdev->name,
528 &pdev->dev, &twl_rtc_ops, THIS_MODULE); 530 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
529 if (IS_ERR(rtc)) { 531 if (IS_ERR(rtc)) {
@@ -542,7 +544,6 @@ static int twl_rtc_probe(struct platform_device *pdev)
542 } 544 }
543 545
544 platform_set_drvdata(pdev, rtc); 546 platform_set_drvdata(pdev, rtc);
545 device_init_wakeup(&pdev->dev, 1);
546 return 0; 547 return 0;
547 548
548out2: 549out2:
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 17150a778984..451bf99582ff 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2392,6 +2392,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2392 rc = cqr->intrc; 2392 rc = cqr->intrc;
2393 else 2393 else
2394 rc = -EIO; 2394 rc = -EIO;
2395
2396 /* kick tasklets */
2397 dasd_schedule_device_bh(device);
2398 if (device->block)
2399 dasd_schedule_block_bh(device->block);
2400
2395 return rc; 2401 return rc;
2396} 2402}
2397 2403
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 1d4c8fe72752..c82fe65c4128 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
102 102
103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
104 zfcp_erp_action_dismiss(&port->erp_action); 104 zfcp_erp_action_dismiss(&port->erp_action);
105 else 105 else {
106 shost_for_each_device(sdev, port->adapter->scsi_host) 106 spin_lock(port->adapter->scsi_host->host_lock);
107 __shost_for_each_device(sdev, port->adapter->scsi_host)
107 if (sdev_to_zfcp(sdev)->port == port) 108 if (sdev_to_zfcp(sdev)->port == port)
108 zfcp_erp_action_dismiss_lun(sdev); 109 zfcp_erp_action_dismiss_lun(sdev);
110 spin_unlock(port->adapter->scsi_host->host_lock);
111 }
109} 112}
110 113
111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 114static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
592{ 595{
593 struct scsi_device *sdev; 596 struct scsi_device *sdev;
594 597
595 shost_for_each_device(sdev, port->adapter->scsi_host) 598 spin_lock(port->adapter->scsi_host->host_lock);
599 __shost_for_each_device(sdev, port->adapter->scsi_host)
596 if (sdev_to_zfcp(sdev)->port == port) 600 if (sdev_to_zfcp(sdev)->port == port)
597 _zfcp_erp_lun_reopen(sdev, clear, id, 0); 601 _zfcp_erp_lun_reopen(sdev, clear, id, 0);
602 spin_unlock(port->adapter->scsi_host->host_lock);
598} 603}
599 604
600static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 605static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1434 atomic_set_mask(common_mask, &port->status); 1439 atomic_set_mask(common_mask, &port->status);
1435 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1440 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1436 1441
1437 shost_for_each_device(sdev, adapter->scsi_host) 1442 spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1443 __shost_for_each_device(sdev, adapter->scsi_host)
1438 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1444 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1445 spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1439} 1446}
1440 1447
1441/** 1448/**
@@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1469 } 1476 }
1470 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1477 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1471 1478
1472 shost_for_each_device(sdev, adapter->scsi_host) { 1479 spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1480 __shost_for_each_device(sdev, adapter->scsi_host) {
1473 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1481 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1474 if (clear_counter) 1482 if (clear_counter)
1475 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1483 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1476 } 1484 }
1485 spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1477} 1486}
1478 1487
1479/** 1488/**
@@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
1487{ 1496{
1488 struct scsi_device *sdev; 1497 struct scsi_device *sdev;
1489 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1498 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1499 unsigned long flags;
1490 1500
1491 atomic_set_mask(mask, &port->status); 1501 atomic_set_mask(mask, &port->status);
1492 1502
1493 if (!common_mask) 1503 if (!common_mask)
1494 return; 1504 return;
1495 1505
1496 shost_for_each_device(sdev, port->adapter->scsi_host) 1506 spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1507 __shost_for_each_device(sdev, port->adapter->scsi_host)
1497 if (sdev_to_zfcp(sdev)->port == port) 1508 if (sdev_to_zfcp(sdev)->port == port)
1498 atomic_set_mask(common_mask, 1509 atomic_set_mask(common_mask,
1499 &sdev_to_zfcp(sdev)->status); 1510 &sdev_to_zfcp(sdev)->status);
1511 spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1500} 1512}
1501 1513
1502/** 1514/**
@@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1511 struct scsi_device *sdev; 1523 struct scsi_device *sdev;
1512 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1524 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1513 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; 1525 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1526 unsigned long flags;
1514 1527
1515 atomic_clear_mask(mask, &port->status); 1528 atomic_clear_mask(mask, &port->status);
1516 1529
@@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1520 if (clear_counter) 1533 if (clear_counter)
1521 atomic_set(&port->erp_counter, 0); 1534 atomic_set(&port->erp_counter, 0);
1522 1535
1523 shost_for_each_device(sdev, port->adapter->scsi_host) 1536 spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1537 __shost_for_each_device(sdev, port->adapter->scsi_host)
1524 if (sdev_to_zfcp(sdev)->port == port) { 1538 if (sdev_to_zfcp(sdev)->port == port) {
1525 atomic_clear_mask(common_mask, 1539 atomic_clear_mask(common_mask,
1526 &sdev_to_zfcp(sdev)->status); 1540 &sdev_to_zfcp(sdev)->status);
1527 if (clear_counter) 1541 if (clear_counter)
1528 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1542 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1529 } 1543 }
1544 spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1530} 1545}
1531 1546
1532/** 1547/**
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 665e3cfaaf85..de0598eaacd2 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
224 224
225static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 225static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
226{ 226{
227 spin_lock_irq(&qdio->req_q_lock);
228 if (atomic_read(&qdio->req_q_free) || 227 if (atomic_read(&qdio->req_q_free) ||
229 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 228 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
230 return 1; 229 return 1;
231 spin_unlock_irq(&qdio->req_q_lock);
232 return 0; 230 return 0;
233} 231}
234 232
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
246{ 244{
247 long ret; 245 long ret;
248 246
249 spin_unlock_irq(&qdio->req_q_lock); 247 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
250 ret = wait_event_interruptible_timeout(qdio->req_q_wq, 248 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
251 zfcp_qdio_sbal_check(qdio), 5 * HZ);
252 249
253 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 250 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
254 return -EIO; 251 return -EIO;
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
262 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); 259 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
263 } 260 }
264 261
265 spin_lock_irq(&qdio->req_q_lock);
266 return -EIO; 262 return -EIO;
267} 263}
268 264
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3f01bbf0609f..890639274bcf 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
28 zfcp_sysfs_##_feat##_##_name##_show, NULL); 28 zfcp_sysfs_##_feat##_##_name##_show, NULL);
29 29
30#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
31static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
32 struct device_attribute *at,\
33 char *buf) \
34{ \
35 return sprintf(buf, _format, _value); \
36} \
37static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
38 zfcp_sysfs_##_feat##_##_name##_show, NULL);
39
30#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ 40#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
31static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ 41static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
32 struct device_attribute *at,\ 42 struct device_attribute *at,\
@@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
75ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", 85ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
76 (zfcp_unit_sdev_status(unit) & 86 (zfcp_unit_sdev_status(unit) &
77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 87 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
88ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
89ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
78 90
79static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, 91static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
80 struct device_attribute *attr, 92 struct device_attribute *attr,
@@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = {
347 &dev_attr_unit_in_recovery.attr, 359 &dev_attr_unit_in_recovery.attr,
348 &dev_attr_unit_status.attr, 360 &dev_attr_unit_status.attr,
349 &dev_attr_unit_access_denied.attr, 361 &dev_attr_unit_access_denied.attr,
362 &dev_attr_unit_access_shared.attr,
363 &dev_attr_unit_access_readonly.attr,
350 NULL 364 NULL
351}; 365};
352static struct attribute_group zfcp_unit_attr_group = { 366static struct attribute_group zfcp_unit_attr_group = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 48b2918e0d65..92ff027746f2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1353,7 +1353,6 @@ config SCSI_LPFC
1353 tristate "Emulex LightPulse Fibre Channel Support" 1353 tristate "Emulex LightPulse Fibre Channel Support"
1354 depends on PCI && SCSI 1354 depends on PCI && SCSI
1355 select SCSI_FC_ATTRS 1355 select SCSI_FC_ATTRS
1356 select GENERIC_CSUM
1357 select CRC_T10DIF 1356 select CRC_T10DIF
1358 help 1357 help
1359 This lpfc driver supports the Emulex LightPulse 1358 This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index b6d1f92ed33c..c18c68150e9f 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -38,7 +38,7 @@
38 38
39#define DRV_NAME "fnic" 39#define DRV_NAME "fnic"
40#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 40#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
41#define DRV_VERSION "1.5.0.22" 41#define DRV_VERSION "1.5.0.23"
42#define PFX DRV_NAME ": " 42#define PFX DRV_NAME ": "
43#define DFX DRV_NAME "%d: " 43#define DFX DRV_NAME "%d: "
44 44
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 5f09d1814d26..42e15ee6e1bb 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -642,19 +642,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
642 INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); 642 INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
643 INIT_WORK(&fnic->event_work, fnic_handle_event); 643 INIT_WORK(&fnic->event_work, fnic_handle_event);
644 skb_queue_head_init(&fnic->fip_frame_queue); 644 skb_queue_head_init(&fnic->fip_frame_queue);
645 spin_lock_irqsave(&fnic_list_lock, flags);
646 if (!fnic_fip_queue) {
647 fnic_fip_queue =
648 create_singlethread_workqueue("fnic_fip_q");
649 if (!fnic_fip_queue) {
650 spin_unlock_irqrestore(&fnic_list_lock, flags);
651 printk(KERN_ERR PFX "fnic FIP work queue "
652 "create failed\n");
653 err = -ENOMEM;
654 goto err_out_free_max_pool;
655 }
656 }
657 spin_unlock_irqrestore(&fnic_list_lock, flags);
658 INIT_LIST_HEAD(&fnic->evlist); 645 INIT_LIST_HEAD(&fnic->evlist);
659 INIT_LIST_HEAD(&fnic->vlans); 646 INIT_LIST_HEAD(&fnic->vlans);
660 } else { 647 } else {
@@ -960,6 +947,13 @@ static int __init fnic_init_module(void)
960 spin_lock_init(&fnic_list_lock); 947 spin_lock_init(&fnic_list_lock);
961 INIT_LIST_HEAD(&fnic_list); 948 INIT_LIST_HEAD(&fnic_list);
962 949
950 fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
951 if (!fnic_fip_queue) {
952 printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
953 err = -ENOMEM;
954 goto err_create_fip_workq;
955 }
956
963 fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); 957 fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
964 if (!fnic_fc_transport) { 958 if (!fnic_fc_transport) {
965 printk(KERN_ERR PFX "fc_attach_transport error\n"); 959 printk(KERN_ERR PFX "fc_attach_transport error\n");
@@ -978,6 +972,8 @@ static int __init fnic_init_module(void)
978err_pci_register: 972err_pci_register:
979 fc_release_transport(fnic_fc_transport); 973 fc_release_transport(fnic_fc_transport);
980err_fc_transport: 974err_fc_transport:
975 destroy_workqueue(fnic_fip_queue);
976err_create_fip_workq:
981 destroy_workqueue(fnic_event_queue); 977 destroy_workqueue(fnic_event_queue);
982err_create_fnic_workq: 978err_create_fnic_workq:
983 kmem_cache_destroy(fnic_io_req_cache); 979 kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 7b082157eb79..99d2930b18c8 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -185,7 +185,7 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
185 cmd_iu->_r_c = 0; 185 cmd_iu->_r_c = 0;
186 186
187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, 187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
188 task->ssp_task.cmd->cmd_len / sizeof(u32)); 188 (task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
189} 189}
190 190
191static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) 191static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 9bb020ac089c..0d30ca849e8f 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -491,6 +491,7 @@ int isci_task_abort_task(struct sas_task *task)
491 struct isci_tmf tmf; 491 struct isci_tmf tmf;
492 int ret = TMF_RESP_FUNC_FAILED; 492 int ret = TMF_RESP_FUNC_FAILED;
493 unsigned long flags; 493 unsigned long flags;
494 int target_done_already = 0;
494 495
495 /* Get the isci_request reference from the task. Note that 496 /* Get the isci_request reference from the task. Note that
496 * this check does not depend on the pending request list 497 * this check does not depend on the pending request list
@@ -505,9 +506,11 @@ int isci_task_abort_task(struct sas_task *task)
505 /* If task is already done, the request isn't valid */ 506 /* If task is already done, the request isn't valid */
506 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && 507 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
507 (task->task_state_flags & SAS_TASK_AT_INITIATOR) && 508 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
508 old_request) 509 old_request) {
509 idev = isci_get_device(task->dev->lldd_dev); 510 idev = isci_get_device(task->dev->lldd_dev);
510 511 target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
512 &old_request->flags);
513 }
511 spin_unlock(&task->task_state_lock); 514 spin_unlock(&task->task_state_lock);
512 spin_unlock_irqrestore(&ihost->scic_lock, flags); 515 spin_unlock_irqrestore(&ihost->scic_lock, flags);
513 516
@@ -561,7 +564,7 @@ int isci_task_abort_task(struct sas_task *task)
561 564
562 if (task->task_proto == SAS_PROTOCOL_SMP || 565 if (task->task_proto == SAS_PROTOCOL_SMP ||
563 sas_protocol_ata(task->task_proto) || 566 sas_protocol_ata(task->task_proto) ||
564 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) || 567 target_done_already ||
565 test_bit(IDEV_GONE, &idev->flags)) { 568 test_bit(IDEV_GONE, &idev->flags)) {
566 569
567 spin_unlock_irqrestore(&ihost->scic_lock, flags); 570 spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0177295599e0..1f0ca68409d4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance)
3547 break; 3547 break;
3548 } 3548 }
3549 3549
3550 /* 3550 if (megasas_transition_to_ready(instance, 0)) {
3551 * We expect the FW state to be READY 3551 atomic_set(&instance->fw_reset_no_pci_access, 1);
3552 */ 3552 instance->instancet->adp_reset
3553 if (megasas_transition_to_ready(instance, 0)) 3553 (instance, instance->reg_set);
3554 goto fail_ready_state; 3554 atomic_set(&instance->fw_reset_no_pci_access, 0);
3555 dev_info(&instance->pdev->dev,
3556 "megasas: FW restarted successfully from %s!\n",
3557 __func__);
3558
3559 /*waitting for about 30 second before retry*/
3560 ssleep(30);
3561
3562 if (megasas_transition_to_ready(instance, 0))
3563 goto fail_ready_state;
3564 }
3555 3565
3556 /* 3566 /*
3557 * MSI-X host index 0 is common for all adapter. 3567 * MSI-X host index 0 is common for all adapter.
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index f14665a6293d..6b1b4e91e53f 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1857,11 +1857,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1857 goto out; 1857 goto out;
1858 } 1858 }
1859 1859
1860 /* error info record present */ 1860 /*
1861 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 1861 * error info record present; slot->response is 32 bit aligned but may
1862 * not be 64 bit aligned, so check for zero in two 32 bit reads
1863 */
1864 if (unlikely((rx_desc & RXQ_ERR)
1865 && (*((u32 *)slot->response)
1866 || *(((u32 *)slot->response) + 1)))) {
1862 mv_dprintk("port %d slot %d rx_desc %X has error info" 1867 mv_dprintk("port %d slot %d rx_desc %X has error info"
1863 "%016llX.\n", slot->port->sas_port.id, slot_idx, 1868 "%016llX.\n", slot->port->sas_port.id, slot_idx,
1864 rx_desc, (u64)(*(u64 *)slot->response)); 1869 rx_desc, get_unaligned_le64(slot->response));
1865 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1870 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1866 tstat->resp = SAS_TASK_COMPLETE; 1871 tstat->resp = SAS_TASK_COMPLETE;
1867 goto out; 1872 goto out;
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 60e2fb7f2dca..d6b19dc80bee 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -39,6 +39,7 @@
39#include <linux/irq.h> 39#include <linux/irq.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <asm/unaligned.h>
42#include <scsi/libsas.h> 43#include <scsi/libsas.h>
43#include <scsi/scsi.h> 44#include <scsi/scsi.h>
44#include <scsi/scsi_tcq.h> 45#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ef481db942..ef0a5481b9dd 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -419,6 +419,8 @@ qla2x00_start_scsi(srb_t *sp)
419 __constant_cpu_to_le16(CF_SIMPLE_TAG); 419 __constant_cpu_to_le16(CF_SIMPLE_TAG);
420 break; 420 break;
421 } 421 }
422 } else {
423 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
422 } 424 }
423 425
424 /* Load SCSI command packet. */ 426 /* Load SCSI command packet. */
@@ -1307,11 +1309,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1307 fcp_cmnd->task_attribute = TSK_ORDERED; 1309 fcp_cmnd->task_attribute = TSK_ORDERED;
1308 break; 1310 break;
1309 default: 1311 default:
1310 fcp_cmnd->task_attribute = 0; 1312 fcp_cmnd->task_attribute = TSK_SIMPLE;
1311 break; 1313 break;
1312 } 1314 }
1313 } else { 1315 } else {
1314 fcp_cmnd->task_attribute = 0; 1316 fcp_cmnd->task_attribute = TSK_SIMPLE;
1315 } 1317 }
1316 1318
1317 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1319 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
@@ -1525,7 +1527,12 @@ qla24xx_start_scsi(srb_t *sp)
1525 case ORDERED_QUEUE_TAG: 1527 case ORDERED_QUEUE_TAG:
1526 cmd_pkt->task = TSK_ORDERED; 1528 cmd_pkt->task = TSK_ORDERED;
1527 break; 1529 break;
1530 default:
1531 cmd_pkt->task = TSK_SIMPLE;
1532 break;
1528 } 1533 }
1534 } else {
1535 cmd_pkt->task = TSK_SIMPLE;
1529 } 1536 }
1530 1537
1531 /* Load SCSI command packet. */ 1538 /* Load SCSI command packet. */
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3b1ea34e1f5a..eaa808e6ba91 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1031,6 +1031,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
1031{ 1031{
1032 int i, result; 1032 int i, result;
1033 1033
1034 if (sdev->skip_vpd_pages)
1035 goto fail;
1036
1034 /* Ask for all the pages supported by this device */ 1037 /* Ask for all the pages supported by this device */
1035 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); 1038 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
1036 if (result) 1039 if (result)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 80f39b8b0223..86fcf2c313ad 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -838,10 +838,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
838 838
839static void sd_unprep_fn(struct request_queue *q, struct request *rq) 839static void sd_unprep_fn(struct request_queue *q, struct request *rq)
840{ 840{
841 struct scsi_cmnd *SCpnt = rq->special;
842
841 if (rq->cmd_flags & REQ_DISCARD) { 843 if (rq->cmd_flags & REQ_DISCARD) {
842 free_page((unsigned long)rq->buffer); 844 free_page((unsigned long)rq->buffer);
843 rq->buffer = NULL; 845 rq->buffer = NULL;
844 } 846 }
847 if (SCpnt->cmnd != rq->cmd) {
848 mempool_free(SCpnt->cmnd, sd_cdb_pool);
849 SCpnt->cmnd = NULL;
850 SCpnt->cmd_len = 0;
851 }
845} 852}
846 853
847/** 854/**
@@ -1720,21 +1727,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1720 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) 1727 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1721 sd_dif_complete(SCpnt, good_bytes); 1728 sd_dif_complete(SCpnt, good_bytes);
1722 1729
1723 if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
1724 == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
1725
1726 /* We have to print a failed command here as the
1727 * extended CDB gets freed before scsi_io_completion()
1728 * is called.
1729 */
1730 if (result)
1731 scsi_print_command(SCpnt);
1732
1733 mempool_free(SCpnt->cmnd, sd_cdb_pool);
1734 SCpnt->cmnd = NULL;
1735 SCpnt->cmd_len = 0;
1736 }
1737
1738 return good_bytes; 1730 return good_bytes;
1739} 1731}
1740 1732
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 2168258fb2c3..74b88efde6ad 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -751,7 +751,7 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
751 751
752 vscsi->affinity_hint_set = true; 752 vscsi->affinity_hint_set = true;
753 } else { 753 } else {
754 for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++) 754 for (i = 0; i < vscsi->num_queues; i++)
755 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); 755 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
756 756
757 vscsi->affinity_hint_set = false; 757 vscsi->affinity_hint_set = false;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 222d3e37fc28..707966bd5610 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -609,7 +609,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
609 else 609 else
610 buf = (void *)t->tx_buf; 610 buf = (void *)t->tx_buf;
611 t->tx_dma = dma_map_single(&spi->dev, buf, 611 t->tx_dma = dma_map_single(&spi->dev, buf,
612 t->len, DMA_FROM_DEVICE); 612 t->len, DMA_TO_DEVICE);
613 if (!t->tx_dma) { 613 if (!t->tx_dma) {
614 ret = -EFAULT; 614 ret = -EFAULT;
615 goto err_tx_map; 615 goto err_tx_map;
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index 080abf2faf97..a8c344422a77 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -469,7 +469,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
469 unsigned long nr_segs, loff_t ppos) 469 unsigned long nr_segs, loff_t ppos)
470{ 470{
471 struct logger_log *log = file_get_log(iocb->ki_filp); 471 struct logger_log *log = file_get_log(iocb->ki_filp);
472 size_t orig = log->w_off; 472 size_t orig;
473 struct logger_entry header; 473 struct logger_entry header;
474 struct timespec now; 474 struct timespec now;
475 ssize_t ret = 0; 475 ssize_t ret = 0;
@@ -490,6 +490,8 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
490 490
491 mutex_lock(&log->mutex); 491 mutex_lock(&log->mutex);
492 492
493 orig = log->w_off;
494
493 /* 495 /*
494 * Fix up any readers, pulling them forward to the first readable 496 * Fix up any readers, pulling them forward to the first readable
495 * entry after (what will be) the new write offset. We do this now 497 * entry after (what will be) the new write offset. We do this now
diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO
index b10f739b7e3e..fa8da9aada30 100644
--- a/drivers/staging/comedi/TODO
+++ b/drivers/staging/comedi/TODO
@@ -9,4 +9,4 @@ TODO:
9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and 9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
10copy: 10copy:
11 Ian Abbott <abbotti@mev.co.uk> 11 Ian Abbott <abbotti@mev.co.uk>
12 Frank Mori Hess <fmhess@users.sourceforge.net> 12 H Hartley Sweeten <hsweeten@visionengravers.com>
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 8647518259f6..f4a197b2d1fd 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1413,22 +1413,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
1413 DPRINTK("subdevice busy\n"); 1413 DPRINTK("subdevice busy\n");
1414 return -EBUSY; 1414 return -EBUSY;
1415 } 1415 }
1416 s->busy = file;
1417 1416
1418 /* make sure channel/gain list isn't too long */ 1417 /* make sure channel/gain list isn't too long */
1419 if (cmd.chanlist_len > s->len_chanlist) { 1418 if (cmd.chanlist_len > s->len_chanlist) {
1420 DPRINTK("channel/gain list too long %u > %d\n", 1419 DPRINTK("channel/gain list too long %u > %d\n",
1421 cmd.chanlist_len, s->len_chanlist); 1420 cmd.chanlist_len, s->len_chanlist);
1422 ret = -EINVAL; 1421 return -EINVAL;
1423 goto cleanup;
1424 } 1422 }
1425 1423
1426 /* make sure channel/gain list isn't too short */ 1424 /* make sure channel/gain list isn't too short */
1427 if (cmd.chanlist_len < 1) { 1425 if (cmd.chanlist_len < 1) {
1428 DPRINTK("channel/gain list too short %u < 1\n", 1426 DPRINTK("channel/gain list too short %u < 1\n",
1429 cmd.chanlist_len); 1427 cmd.chanlist_len);
1430 ret = -EINVAL; 1428 return -EINVAL;
1431 goto cleanup;
1432 } 1429 }
1433 1430
1434 async->cmd = cmd; 1431 async->cmd = cmd;
@@ -1438,8 +1435,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
1438 kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL); 1435 kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
1439 if (!async->cmd.chanlist) { 1436 if (!async->cmd.chanlist) {
1440 DPRINTK("allocation failed\n"); 1437 DPRINTK("allocation failed\n");
1441 ret = -ENOMEM; 1438 return -ENOMEM;
1442 goto cleanup;
1443 } 1439 }
1444 1440
1445 if (copy_from_user(async->cmd.chanlist, user_chanlist, 1441 if (copy_from_user(async->cmd.chanlist, user_chanlist,
@@ -1491,6 +1487,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
1491 1487
1492 comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING); 1488 comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
1493 1489
1490 /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
1491 * comedi_read() or comedi_write() */
1492 s->busy = file;
1494 ret = s->do_cmd(dev, s); 1493 ret = s->do_cmd(dev, s);
1495 if (ret == 0) 1494 if (ret == 0)
1496 return 0; 1495 return 0;
@@ -1705,6 +1704,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
1705 void *file) 1704 void *file)
1706{ 1705{
1707 struct comedi_subdevice *s; 1706 struct comedi_subdevice *s;
1707 int ret;
1708 1708
1709 if (arg >= dev->n_subdevices) 1709 if (arg >= dev->n_subdevices)
1710 return -EINVAL; 1710 return -EINVAL;
@@ -1721,7 +1721,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
1721 if (s->busy != file) 1721 if (s->busy != file)
1722 return -EBUSY; 1722 return -EBUSY;
1723 1723
1724 return do_cancel(dev, s); 1724 ret = do_cancel(dev, s);
1725 if (comedi_get_subdevice_runflags(s) & SRF_USER)
1726 wake_up_interruptible(&s->async->wait_head);
1727
1728 return ret;
1725} 1729}
1726 1730
1727/* 1731/*
@@ -2053,11 +2057,13 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
2053 2057
2054 if (!comedi_is_subdevice_running(s)) { 2058 if (!comedi_is_subdevice_running(s)) {
2055 if (count == 0) { 2059 if (count == 0) {
2060 mutex_lock(&dev->mutex);
2056 if (comedi_is_subdevice_in_error(s)) 2061 if (comedi_is_subdevice_in_error(s))
2057 retval = -EPIPE; 2062 retval = -EPIPE;
2058 else 2063 else
2059 retval = 0; 2064 retval = 0;
2060 do_become_nonbusy(dev, s); 2065 do_become_nonbusy(dev, s);
2066 mutex_unlock(&dev->mutex);
2061 } 2067 }
2062 break; 2068 break;
2063 } 2069 }
@@ -2156,11 +2162,13 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2156 2162
2157 if (n == 0) { 2163 if (n == 0) {
2158 if (!comedi_is_subdevice_running(s)) { 2164 if (!comedi_is_subdevice_running(s)) {
2165 mutex_lock(&dev->mutex);
2159 do_become_nonbusy(dev, s); 2166 do_become_nonbusy(dev, s);
2160 if (comedi_is_subdevice_in_error(s)) 2167 if (comedi_is_subdevice_in_error(s))
2161 retval = -EPIPE; 2168 retval = -EPIPE;
2162 else 2169 else
2163 retval = 0; 2170 retval = 0;
2171 mutex_unlock(&dev->mutex);
2164 break; 2172 break;
2165 } 2173 }
2166 if (file->f_flags & O_NONBLOCK) { 2174 if (file->f_flags & O_NONBLOCK) {
@@ -2198,9 +2206,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2198 buf += n; 2206 buf += n;
2199 break; /* makes device work like a pipe */ 2207 break; /* makes device work like a pipe */
2200 } 2208 }
2201 if (comedi_is_subdevice_idle(s) && 2209 if (comedi_is_subdevice_idle(s)) {
2202 async->buf_read_count - async->buf_write_count == 0) { 2210 mutex_lock(&dev->mutex);
2203 do_become_nonbusy(dev, s); 2211 if (async->buf_read_count - async->buf_write_count == 0)
2212 do_become_nonbusy(dev, s);
2213 mutex_unlock(&dev->mutex);
2204 } 2214 }
2205 set_current_state(TASK_RUNNING); 2215 set_current_state(TASK_RUNNING);
2206 remove_wait_queue(&async->wait_head, &wait); 2216 remove_wait_queue(&async->wait_head, &wait);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index e25eba5713c1..b3b5125faa72 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
482 ret = comedi_device_postconfig(dev); 482 ret = comedi_device_postconfig(dev);
483 if (ret < 0) { 483 if (ret < 0) {
484 comedi_device_detach(dev); 484 comedi_device_detach(dev);
485 module_put(dev->driver->module); 485 module_put(driv->module);
486 } 486 }
487 /* On success, the driver module count has been incremented. */ 487 /* On success, the driver module count has been incremented. */
488 return ret; 488 return ret;
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 5590ebf1da15..817f837b240d 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -827,11 +827,11 @@ static void usb_alphatrack_disconnect(struct usb_interface *intf)
827 mutex_unlock(&dev->mtx); 827 mutex_unlock(&dev->mtx);
828 usb_alphatrack_delete(dev); 828 usb_alphatrack_delete(dev);
829 } else { 829 } else {
830 atomic_set(&dev->writes_pending, 0);
830 dev->intf = NULL; 831 dev->intf = NULL;
831 mutex_unlock(&dev->mtx); 832 mutex_unlock(&dev->mtx);
832 } 833 }
833 834
834 atomic_set(&dev->writes_pending, 0);
835 mutex_unlock(&disconnect_mutex); 835 mutex_unlock(&disconnect_mutex);
836 836
837 dev_info(&intf->dev, "Alphatrack Surface #%d now disconnected\n", 837 dev_info(&intf->dev, "Alphatrack Surface #%d now disconnected\n",
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c
index b795353e8348..cc3692439a5c 100644
--- a/drivers/staging/gdm72xx/gdm_qos.c
+++ b/drivers/staging/gdm72xx/gdm_qos.c
@@ -250,8 +250,8 @@ static void send_qos_list(struct nic *nic, struct list_head *head)
250 250
251 list_for_each_entry_safe(entry, n, head, list) { 251 list_for_each_entry_safe(entry, n, head, list) {
252 list_del(&entry->list); 252 list_del(&entry->list);
253 free_qos_entry(entry);
254 gdm_wimax_send_tx(entry->skb, entry->dev); 253 gdm_wimax_send_tx(entry->skb, entry->dev);
254 free_qos_entry(entry);
255 } 255 }
256} 256}
257 257
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 22339059837f..bd0f2fd01db4 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -33,7 +33,6 @@ config DRM_IMX_TVE
33config DRM_IMX_LDB 33config DRM_IMX_LDB
34 tristate "Support for LVDS displays" 34 tristate "Support for LVDS displays"
35 depends on DRM_IMX 35 depends on DRM_IMX
36 select OF_VIDEOMODE
37 help 36 help
38 Choose this to enable the internal LVDS Display Bridge (LDB) 37 Choose this to enable the internal LVDS Display Bridge (LDB)
39 found on i.MX53 and i.MX6 processors. 38 found on i.MX53 and i.MX6 processors.
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 9854a1daf606..e826086ec308 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -69,28 +69,20 @@ struct imx_drm_connector {
69 struct module *owner; 69 struct module *owner;
70}; 70};
71 71
72static int imx_drm_driver_firstopen(struct drm_device *drm)
73{
74 if (!imx_drm_device_get())
75 return -EINVAL;
76
77 return 0;
78}
79
80static void imx_drm_driver_lastclose(struct drm_device *drm) 72static void imx_drm_driver_lastclose(struct drm_device *drm)
81{ 73{
82 struct imx_drm_device *imxdrm = drm->dev_private; 74 struct imx_drm_device *imxdrm = drm->dev_private;
83 75
84 if (imxdrm->fbhelper) 76 if (imxdrm->fbhelper)
85 drm_fbdev_cma_restore_mode(imxdrm->fbhelper); 77 drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
86
87 imx_drm_device_put();
88} 78}
89 79
90static int imx_drm_driver_unload(struct drm_device *drm) 80static int imx_drm_driver_unload(struct drm_device *drm)
91{ 81{
92 struct imx_drm_device *imxdrm = drm->dev_private; 82 struct imx_drm_device *imxdrm = drm->dev_private;
93 83
84 imx_drm_device_put();
85
94 drm_mode_config_cleanup(imxdrm->drm); 86 drm_mode_config_cleanup(imxdrm->drm);
95 drm_kms_helper_poll_fini(imxdrm->drm); 87 drm_kms_helper_poll_fini(imxdrm->drm);
96 88
@@ -207,7 +199,6 @@ static const struct file_operations imx_drm_driver_fops = {
207 .unlocked_ioctl = drm_ioctl, 199 .unlocked_ioctl = drm_ioctl,
208 .mmap = drm_gem_cma_mmap, 200 .mmap = drm_gem_cma_mmap,
209 .poll = drm_poll, 201 .poll = drm_poll,
210 .fasync = drm_fasync,
211 .read = drm_read, 202 .read = drm_read,
212 .llseek = noop_llseek, 203 .llseek = noop_llseek,
213}; 204};
@@ -226,8 +217,6 @@ struct drm_device *imx_drm_device_get(void)
226 struct imx_drm_connector *con; 217 struct imx_drm_connector *con;
227 struct imx_drm_crtc *crtc; 218 struct imx_drm_crtc *crtc;
228 219
229 mutex_lock(&imxdrm->mutex);
230
231 list_for_each_entry(enc, &imxdrm->encoder_list, list) { 220 list_for_each_entry(enc, &imxdrm->encoder_list, list) {
232 if (!try_module_get(enc->owner)) { 221 if (!try_module_get(enc->owner)) {
233 dev_err(imxdrm->dev, "could not get module %s\n", 222 dev_err(imxdrm->dev, "could not get module %s\n",
@@ -254,8 +243,6 @@ struct drm_device *imx_drm_device_get(void)
254 243
255 imxdrm->references++; 244 imxdrm->references++;
256 245
257 mutex_unlock(&imxdrm->mutex);
258
259 return imxdrm->drm; 246 return imxdrm->drm;
260 247
261unwind_crtc: 248unwind_crtc:
@@ -447,6 +434,9 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
447 */ 434 */
448 imxdrm->drm->vblank_disable_allowed = 1; 435 imxdrm->drm->vblank_disable_allowed = 1;
449 436
437 if (!imx_drm_device_get())
438 ret = -EINVAL;
439
450 ret = 0; 440 ret = 0;
451 441
452err_init: 442err_init:
@@ -783,7 +773,7 @@ int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
783} 773}
784EXPORT_SYMBOL_GPL(imx_drm_remove_connector); 774EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
785 775
786static struct drm_ioctl_desc imx_drm_ioctls[] = { 776static const struct drm_ioctl_desc imx_drm_ioctls[] = {
787 /* none so far */ 777 /* none so far */
788}; 778};
789 779
@@ -791,13 +781,12 @@ static struct drm_driver imx_drm_driver = {
791 .driver_features = DRIVER_MODESET | DRIVER_GEM, 781 .driver_features = DRIVER_MODESET | DRIVER_GEM,
792 .load = imx_drm_driver_load, 782 .load = imx_drm_driver_load,
793 .unload = imx_drm_driver_unload, 783 .unload = imx_drm_driver_unload,
794 .firstopen = imx_drm_driver_firstopen,
795 .lastclose = imx_drm_driver_lastclose, 784 .lastclose = imx_drm_driver_lastclose,
796 .gem_free_object = drm_gem_cma_free_object, 785 .gem_free_object = drm_gem_cma_free_object,
797 .gem_vm_ops = &drm_gem_cma_vm_ops, 786 .gem_vm_ops = &drm_gem_cma_vm_ops,
798 .dumb_create = drm_gem_cma_dumb_create, 787 .dumb_create = drm_gem_cma_dumb_create,
799 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 788 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
800 .dumb_destroy = drm_gem_cma_dumb_destroy, 789 .dumb_destroy = drm_gem_dumb_destroy,
801 790
802 .get_vblank_counter = drm_vblank_count, 791 .get_vblank_counter = drm_vblank_count,
803 .enable_vblank = imx_drm_enable_vblank, 792 .enable_vblank = imx_drm_enable_vblank,
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 9176a8171e6f..e39690a03e38 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -129,7 +129,8 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
129 129
130static int ipu_page_flip(struct drm_crtc *crtc, 130static int ipu_page_flip(struct drm_crtc *crtc,
131 struct drm_framebuffer *fb, 131 struct drm_framebuffer *fb,
132 struct drm_pending_vblank_event *event) 132 struct drm_pending_vblank_event *event,
133 uint32_t page_flip_flags)
133{ 134{
134 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 135 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
135 int ret; 136 int ret;
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index c191ae203565..41e88abe47af 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -1120,8 +1120,11 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
1120 or DYN_EXTERNAL, then mem granularity information is present 1120 or DYN_EXTERNAL, then mem granularity information is present
1121 within the section name - only process if there are at least three 1121 within the section name - only process if there are at least three
1122 tokens within the section name (just a minor optimization) */ 1122 tokens within the section name (just a minor optimization) */
1123 if (count >= 3) 1123 if (count >= 3) {
1124 strict_strtol(sz_last_token, 10, (long *)&req); 1124 status = kstrtos32(sz_last_token, 10, &req);
1125 if (status)
1126 goto func_cont;
1127 }
1125 1128
1126 if ((req == 0) || (req == 1)) { 1129 if ((req == 0) || (req == 1)) {
1127 if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) { 1130 if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) {
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index dcceed29d31a..81972fa47beb 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1811,10 +1811,12 @@ static int zcache_comp_init(void)
1811#else 1811#else
1812 if (*zcache_comp_name != '\0') { 1812 if (*zcache_comp_name != '\0') {
1813 ret = crypto_has_comp(zcache_comp_name, 0, 0); 1813 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1814 if (!ret) 1814 if (!ret) {
1815 pr_info("zcache: %s not supported\n", 1815 pr_info("zcache: %s not supported\n",
1816 zcache_comp_name); 1816 zcache_comp_name);
1817 goto out; 1817 ret = 1;
1818 goto out;
1819 }
1818 } 1820 }
1819 if (!ret) 1821 if (!ret)
1820 strcpy(zcache_comp_name, "lzo"); 1822 strcpy(zcache_comp_name, "lzo");
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 82c7202fd5cc..e77fb6ea40c9 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -527,8 +527,11 @@ static void zram_reset_device(struct zram *zram)
527 size_t index; 527 size_t index;
528 struct zram_meta *meta; 528 struct zram_meta *meta;
529 529
530 if (!zram->init_done) 530 down_write(&zram->init_lock);
531 if (!zram->init_done) {
532 up_write(&zram->init_lock);
531 return; 533 return;
534 }
532 535
533 meta = zram->meta; 536 meta = zram->meta;
534 zram->init_done = 0; 537 zram->init_done = 0;
@@ -549,6 +552,7 @@ static void zram_reset_device(struct zram *zram)
549 552
550 zram->disksize = 0; 553 zram->disksize = 0;
551 set_capacity(zram->disk, 0); 554 set_capacity(zram->disk, 0);
555 up_write(&zram->init_lock);
552} 556}
553 557
554static void zram_init_device(struct zram *zram, struct zram_meta *meta) 558static void zram_init_device(struct zram *zram, struct zram_meta *meta)
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 5de56f671a9d..f36950e4134f 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -54,6 +54,8 @@ MODULE_PARM_DESC(notify_delay_ms,
54* is some wrong values returned by cpuid for number of thresholds. 54* is some wrong values returned by cpuid for number of thresholds.
55*/ 55*/
56#define MAX_NUMBER_OF_TRIPS 2 56#define MAX_NUMBER_OF_TRIPS 2
57/* Limit number of package temp zones */
58#define MAX_PKG_TEMP_ZONE_IDS 256
57 59
58struct phy_dev_entry { 60struct phy_dev_entry {
59 struct list_head list; 61 struct list_head list;
@@ -394,12 +396,16 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
394 char buffer[30]; 396 char buffer[30];
395 int thres_count; 397 int thres_count;
396 u32 eax, ebx, ecx, edx; 398 u32 eax, ebx, ecx, edx;
399 u8 *temp;
397 400
398 cpuid(6, &eax, &ebx, &ecx, &edx); 401 cpuid(6, &eax, &ebx, &ecx, &edx);
399 thres_count = ebx & 0x07; 402 thres_count = ebx & 0x07;
400 if (!thres_count) 403 if (!thres_count)
401 return -ENODEV; 404 return -ENODEV;
402 405
406 if (topology_physical_package_id(cpu) > MAX_PKG_TEMP_ZONE_IDS)
407 return -ENODEV;
408
403 thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS); 409 thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS);
404 410
405 err = get_tj_max(cpu, &tj_max); 411 err = get_tj_max(cpu, &tj_max);
@@ -417,13 +423,14 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
417 spin_lock(&pkg_work_lock); 423 spin_lock(&pkg_work_lock);
418 if (topology_physical_package_id(cpu) > max_phy_id) 424 if (topology_physical_package_id(cpu) > max_phy_id)
419 max_phy_id = topology_physical_package_id(cpu); 425 max_phy_id = topology_physical_package_id(cpu);
420 pkg_work_scheduled = krealloc(pkg_work_scheduled, 426 temp = krealloc(pkg_work_scheduled,
421 (max_phy_id+1) * sizeof(u8), GFP_ATOMIC); 427 (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
422 if (!pkg_work_scheduled) { 428 if (!temp) {
423 spin_unlock(&pkg_work_lock); 429 spin_unlock(&pkg_work_lock);
424 err = -ENOMEM; 430 err = -ENOMEM;
425 goto err_ret_free; 431 goto err_ret_free;
426 } 432 }
433 pkg_work_scheduled = temp;
427 pkg_work_scheduled[topology_physical_package_id(cpu)] = 0; 434 pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
428 spin_unlock(&pkg_work_lock); 435 spin_unlock(&pkg_work_lock);
429 436
@@ -511,7 +518,7 @@ static int get_core_online(unsigned int cpu)
511 518
512 /* Check if there is already an instance for this package */ 519 /* Check if there is already an instance for this package */
513 if (!phdev) { 520 if (!phdev) {
514 if (!cpu_has(c, X86_FEATURE_DTHERM) && 521 if (!cpu_has(c, X86_FEATURE_DTHERM) ||
515 !cpu_has(c, X86_FEATURE_PTS)) 522 !cpu_has(c, X86_FEATURE_PTS))
516 return -ENODEV; 523 return -ENODEV;
517 if (pkg_temp_thermal_device_add(cpu)) 524 if (pkg_temp_thermal_device_add(cpu))
@@ -562,7 +569,7 @@ static struct notifier_block pkg_temp_thermal_notifier __refdata = {
562}; 569};
563 570
564static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = { 571static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = {
565 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM }, 572 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS },
566 {} 573 {}
567}; 574};
568MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids); 575MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids);
@@ -592,7 +599,6 @@ static int __init pkg_temp_thermal_init(void)
592 return 0; 599 return 0;
593 600
594err_ret: 601err_ret:
595 get_online_cpus();
596 for_each_online_cpu(i) 602 for_each_online_cpu(i)
597 put_core_offline(i); 603 put_core_offline(i);
598 put_online_cpus(); 604 put_online_cpus();
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 721904f8efa9..946ddd2b3a54 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -193,7 +193,8 @@ static int __init parse_options(struct early_serial8250_device *device,
193 if (options) { 193 if (options) {
194 options++; 194 options++;
195 device->baud = simple_strtoul(options, NULL, 0); 195 device->baud = simple_strtoul(options, NULL, 0);
196 length = min(strcspn(options, " "), sizeof(device->options)); 196 length = min(strcspn(options, " ") + 1,
197 sizeof(device->options));
197 strlcpy(device->options, options, length); 198 strlcpy(device->options, options, length);
198 } else { 199 } else {
199 device->baud = probe_baud(port); 200 device->baud = probe_baud(port);
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index bb91b4713ebd..2e3ea1a70d7b 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -31,9 +31,8 @@ static int __init serial_init_chip(struct parisc_device *dev)
31 int err; 31 int err;
32 32
33#ifdef CONFIG_64BIT 33#ifdef CONFIG_64BIT
34 extern int iosapic_serial_irq(int cellnum);
35 if (!dev->irq && (dev->id.sversion == 0xad)) 34 if (!dev->irq && (dev->id.sversion == 0xad))
36 dev->irq = iosapic_serial_irq(dev->mod_index-1); 35 dev->irq = iosapic_serial_irq(dev);
37#endif 36#endif
38 37
39 if (!dev->irq) { 38 if (!dev->irq) {
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5e3d68917ffe..1456673bcca0 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -277,7 +277,7 @@ config SERIAL_TEGRA
277 select SERIAL_CORE 277 select SERIAL_CORE
278 help 278 help
279 Support for the on-chip UARTs on the NVIDIA Tegra series SOCs 279 Support for the on-chip UARTs on the NVIDIA Tegra series SOCs
280 providing /dev/ttyHS0, 1, 2, 3 and 4 (note, some machines may not 280 providing /dev/ttyTHS0, 1, 2, 3 and 4 (note, some machines may not
281 provide all of these ports, depending on how the serial port 281 provide all of these ports, depending on how the serial port
282 are enabled). This driver uses the APB DMA to achieve higher baudrate 282 are enabled). This driver uses the APB DMA to achieve higher baudrate
283 and better performance. 283 and better performance.
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index cbf1d155b7b2..22f280aa4f2c 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -773,6 +773,6 @@ module_init(arc_serial_init);
773module_exit(arc_serial_exit); 773module_exit(arc_serial_exit);
774 774
775MODULE_LICENSE("GPL"); 775MODULE_LICENSE("GPL");
776MODULE_ALIAS("plat-arcfpga/uart"); 776MODULE_ALIAS("platform:" DRIVER_NAME);
777MODULE_AUTHOR("Vineet Gupta"); 777MODULE_AUTHOR("Vineet Gupta");
778MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver"); 778MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver");
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4f5f161896a1..f85b8e6d0346 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -678,11 +678,18 @@ static void mxs_auart_settermios(struct uart_port *u,
678 678
679static irqreturn_t mxs_auart_irq_handle(int irq, void *context) 679static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
680{ 680{
681 u32 istatus, istat; 681 u32 istat;
682 struct mxs_auart_port *s = context; 682 struct mxs_auart_port *s = context;
683 u32 stat = readl(s->port.membase + AUART_STAT); 683 u32 stat = readl(s->port.membase + AUART_STAT);
684 684
685 istatus = istat = readl(s->port.membase + AUART_INTR); 685 istat = readl(s->port.membase + AUART_INTR);
686
687 /* ack irq */
688 writel(istat & (AUART_INTR_RTIS
689 | AUART_INTR_TXIS
690 | AUART_INTR_RXIS
691 | AUART_INTR_CTSMIS),
692 s->port.membase + AUART_INTR_CLR);
686 693
687 if (istat & AUART_INTR_CTSMIS) { 694 if (istat & AUART_INTR_CTSMIS) {
688 uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS); 695 uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
@@ -702,12 +709,6 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
702 istat &= ~AUART_INTR_TXIS; 709 istat &= ~AUART_INTR_TXIS;
703 } 710 }
704 711
705 writel(istatus & (AUART_INTR_RTIS
706 | AUART_INTR_TXIS
707 | AUART_INTR_RXIS
708 | AUART_INTR_CTSMIS),
709 s->port.membase + AUART_INTR_CLR);
710
711 return IRQ_HANDLED; 712 return IRQ_HANDLED;
712} 713}
713 714
@@ -850,7 +851,7 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
850 struct mxs_auart_port *s; 851 struct mxs_auart_port *s;
851 struct uart_port *port; 852 struct uart_port *port;
852 unsigned int old_ctrl0, old_ctrl2; 853 unsigned int old_ctrl0, old_ctrl2;
853 unsigned int to = 1000; 854 unsigned int to = 20000;
854 855
855 if (co->index >= MXS_AUART_PORTS || co->index < 0) 856 if (co->index >= MXS_AUART_PORTS || co->index < 0)
856 return; 857 return;
@@ -871,18 +872,23 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
871 872
872 uart_console_write(port, str, count, mxs_auart_console_putchar); 873 uart_console_write(port, str, count, mxs_auart_console_putchar);
873 874
874 /* 875 /* Finally, wait for transmitter to become empty ... */
875 * Finally, wait for transmitter to become empty
876 * and restore the TCR
877 */
878 while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) { 876 while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
877 udelay(1);
879 if (!to--) 878 if (!to--)
880 break; 879 break;
881 udelay(1);
882 } 880 }
883 881
884 writel(old_ctrl0, port->membase + AUART_CTRL0); 882 /*
885 writel(old_ctrl2, port->membase + AUART_CTRL2); 883 * ... and restore the TCR if we waited long enough for the transmitter
884 * to be idle. This might keep the transmitter enabled although it is
885 * unused, but that is better than to disable it while it is still
886 * transmitting.
887 */
888 if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) {
889 writel(old_ctrl0, port->membase + AUART_CTRL0);
890 writel(old_ctrl2, port->membase + AUART_CTRL2);
891 }
886 892
887 clk_disable(s->clk); 893 clk_disable(s->clk);
888} 894}
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index ff171384ea52..dc6e96996ead 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -3478,7 +3478,7 @@ static int alloc_buf_list(SLMP_INFO *info)
3478 for ( i = 0; i < info->rx_buf_count; i++ ) { 3478 for ( i = 0; i < info->rx_buf_count; i++ ) {
3479 /* calculate and store physical address of this buffer entry */ 3479 /* calculate and store physical address of this buffer entry */
3480 info->rx_buf_list_ex[i].phys_entry = 3480 info->rx_buf_list_ex[i].phys_entry =
3481 info->buffer_list_phys + (i * sizeof(SCABUFSIZE)); 3481 info->buffer_list_phys + (i * SCABUFSIZE);
3482 3482
3483 /* calculate and store physical address of */ 3483 /* calculate and store physical address of */
3484 /* next entry in cirular list of entries */ 3484 /* next entry in cirular list of entries */
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 121aeb9393e1..f597e88a705d 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -256,10 +256,9 @@ void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
256{ 256{
257 struct tty_struct *tty = tty_port_tty_get(port); 257 struct tty_struct *tty = tty_port_tty_get(port);
258 258
259 if (tty && (!check_clocal || !C_CLOCAL(tty))) { 259 if (tty && (!check_clocal || !C_CLOCAL(tty)))
260 tty_hangup(tty); 260 tty_hangup(tty);
261 tty_kref_put(tty); 261 tty_kref_put(tty);
262 }
263} 262}
264EXPORT_SYMBOL_GPL(tty_port_tty_hangup); 263EXPORT_SYMBOL_GPL(tty_port_tty_hangup);
265 264
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index eb2aa2e5a842..d1bd8ef1f9c1 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -12,7 +12,7 @@ if USB_CHIPIDEA
12 12
13config USB_CHIPIDEA_UDC 13config USB_CHIPIDEA_UDC
14 bool "ChipIdea device controller" 14 bool "ChipIdea device controller"
15 depends on USB_GADGET=y || USB_CHIPIDEA=m 15 depends on USB_GADGET=y || (USB_CHIPIDEA=m && USB_GADGET=m)
16 help 16 help
17 Say Y here to enable device controller functionality of the 17 Say Y here to enable device controller functionality of the
18 ChipIdea driver. 18 ChipIdea driver.
@@ -20,7 +20,7 @@ config USB_CHIPIDEA_UDC
20config USB_CHIPIDEA_HOST 20config USB_CHIPIDEA_HOST
21 bool "ChipIdea host controller" 21 bool "ChipIdea host controller"
22 depends on USB=y 22 depends on USB=y
23 depends on USB_EHCI_HCD=y || USB_CHIPIDEA=m 23 depends on USB_EHCI_HCD=y || (USB_CHIPIDEA=m && USB_EHCI_HCD=m)
24 select USB_EHCI_ROOT_HUB_TT 24 select USB_EHCI_ROOT_HUB_TT
25 help 25 help
26 Say Y here to enable host controller functionality of the 26 Say Y here to enable host controller functionality of the
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index aefa0261220c..1b23e354f9fb 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -50,7 +50,7 @@
50#define PORTSC_PTC (0x0FUL << 16) 50#define PORTSC_PTC (0x0FUL << 16)
51/* PTS and PTW for non lpm version only */ 51/* PTS and PTW for non lpm version only */
52#define PORTSC_PTS(d) \ 52#define PORTSC_PTS(d) \
53 ((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0)) 53 (u32)((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0))
54#define PORTSC_PTW BIT(28) 54#define PORTSC_PTW BIT(28)
55#define PORTSC_STS BIT(29) 55#define PORTSC_STS BIT(29)
56 56
@@ -59,7 +59,7 @@
59#define DEVLC_PSPD_HS (0x02UL << 25) 59#define DEVLC_PSPD_HS (0x02UL << 25)
60#define DEVLC_PTW BIT(27) 60#define DEVLC_PTW BIT(27)
61#define DEVLC_STS BIT(28) 61#define DEVLC_STS BIT(28)
62#define DEVLC_PTS(d) (((d) & 0x7) << 29) 62#define DEVLC_PTS(d) (u32)(((d) & 0x7) << 29)
63 63
64/* Encoding for DEVLC_PTS and PORTSC_PTS */ 64/* Encoding for DEVLC_PTS and PORTSC_PTS */
65#define PTS_UTMI 0 65#define PTS_UTMI 0
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 609dbc2f7151..83b4ef4dfcf8 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1119,11 +1119,11 @@ static int usbtmc_probe(struct usb_interface *intf,
1119 /* Determine if it is a Rigol or not */ 1119 /* Determine if it is a Rigol or not */
1120 data->rigol_quirk = 0; 1120 data->rigol_quirk = 0;
1121 dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", 1121 dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n",
1122 data->usb_dev->descriptor.idVendor, 1122 le16_to_cpu(data->usb_dev->descriptor.idVendor),
1123 data->usb_dev->descriptor.idProduct); 1123 le16_to_cpu(data->usb_dev->descriptor.idProduct));
1124 for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { 1124 for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) {
1125 if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) && 1125 if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) &&
1126 (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) { 1126 (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) {
1127 dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); 1127 dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n");
1128 data->rigol_quirk = 1; 1128 data->rigol_quirk = 1;
1129 break; 1129 break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4191db32f12c..558313de4911 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -668,6 +668,15 @@ resubmit:
668static inline int 668static inline int
669hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) 669hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
670{ 670{
671 /* Need to clear both directions for control ep */
672 if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
673 USB_ENDPOINT_XFER_CONTROL) {
674 int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
675 HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
676 devinfo ^ 0x8000, tt, NULL, 0, 1000);
677 if (status)
678 return status;
679 }
671 return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), 680 return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
672 HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, 681 HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
673 tt, NULL, 0, 1000); 682 tt, NULL, 0, 1000);
@@ -2848,6 +2857,15 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
2848 USB_CTRL_SET_TIMEOUT); 2857 USB_CTRL_SET_TIMEOUT);
2849} 2858}
2850 2859
2860/* Count of wakeup-enabled devices at or below udev */
2861static unsigned wakeup_enabled_descendants(struct usb_device *udev)
2862{
2863 struct usb_hub *hub = usb_hub_to_struct_hub(udev);
2864
2865 return udev->do_remote_wakeup +
2866 (hub ? hub->wakeup_enabled_descendants : 0);
2867}
2868
2851/* 2869/*
2852 * usb_port_suspend - suspend a usb device's upstream port 2870 * usb_port_suspend - suspend a usb device's upstream port
2853 * @udev: device that's no longer in active use, not a root hub 2871 * @udev: device that's no longer in active use, not a root hub
@@ -2888,8 +2906,8 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
2888 * Linux (2.6) currently has NO mechanisms to initiate that: no khubd 2906 * Linux (2.6) currently has NO mechanisms to initiate that: no khubd
2889 * timer, no SRP, no requests through sysfs. 2907 * timer, no SRP, no requests through sysfs.
2890 * 2908 *
2891 * If Runtime PM isn't enabled or used, non-SuperSpeed devices really get 2909 * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get
2892 * suspended only when their bus goes into global suspend (i.e., the root 2910 * suspended until their bus goes into global suspend (i.e., the root
2893 * hub is suspended). Nevertheless, we change @udev->state to 2911 * hub is suspended). Nevertheless, we change @udev->state to
2894 * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual 2912 * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual
2895 * upstream port setting is stored in @udev->port_is_suspended. 2913 * upstream port setting is stored in @udev->port_is_suspended.
@@ -2960,15 +2978,21 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2960 /* see 7.1.7.6 */ 2978 /* see 7.1.7.6 */
2961 if (hub_is_superspeed(hub->hdev)) 2979 if (hub_is_superspeed(hub->hdev))
2962 status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3); 2980 status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
2963 else if (PMSG_IS_AUTO(msg)) 2981
2964 status = set_port_feature(hub->hdev, port1,
2965 USB_PORT_FEAT_SUSPEND);
2966 /* 2982 /*
2967 * For system suspend, we do not need to enable the suspend feature 2983 * For system suspend, we do not need to enable the suspend feature
2968 * on individual USB-2 ports. The devices will automatically go 2984 * on individual USB-2 ports. The devices will automatically go
2969 * into suspend a few ms after the root hub stops sending packets. 2985 * into suspend a few ms after the root hub stops sending packets.
2970 * The USB 2.0 spec calls this "global suspend". 2986 * The USB 2.0 spec calls this "global suspend".
2987 *
2988 * However, many USB hubs have a bug: They don't relay wakeup requests
2989 * from a downstream port if the port's suspend feature isn't on.
2990 * Therefore we will turn on the suspend feature if udev or any of its
2991 * descendants is enabled for remote wakeup.
2971 */ 2992 */
2993 else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
2994 status = set_port_feature(hub->hdev, port1,
2995 USB_PORT_FEAT_SUSPEND);
2972 else { 2996 else {
2973 really_suspend = false; 2997 really_suspend = false;
2974 status = 0; 2998 status = 0;
@@ -3003,15 +3027,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3003 if (!PMSG_IS_AUTO(msg)) 3027 if (!PMSG_IS_AUTO(msg))
3004 status = 0; 3028 status = 0;
3005 } else { 3029 } else {
3006 /* device has up to 10 msec to fully suspend */
3007 dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n", 3030 dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
3008 (PMSG_IS_AUTO(msg) ? "auto-" : ""), 3031 (PMSG_IS_AUTO(msg) ? "auto-" : ""),
3009 udev->do_remote_wakeup); 3032 udev->do_remote_wakeup);
3010 usb_set_device_state(udev, USB_STATE_SUSPENDED);
3011 if (really_suspend) { 3033 if (really_suspend) {
3012 udev->port_is_suspended = 1; 3034 udev->port_is_suspended = 1;
3035
3036 /* device has up to 10 msec to fully suspend */
3013 msleep(10); 3037 msleep(10);
3014 } 3038 }
3039 usb_set_device_state(udev, USB_STATE_SUSPENDED);
3015 } 3040 }
3016 3041
3017 /* 3042 /*
@@ -3293,7 +3318,11 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
3293 unsigned port1; 3318 unsigned port1;
3294 int status; 3319 int status;
3295 3320
3296 /* Warn if children aren't already suspended */ 3321 /*
3322 * Warn if children aren't already suspended.
3323 * Also, add up the number of wakeup-enabled descendants.
3324 */
3325 hub->wakeup_enabled_descendants = 0;
3297 for (port1 = 1; port1 <= hdev->maxchild; port1++) { 3326 for (port1 = 1; port1 <= hdev->maxchild; port1++) {
3298 struct usb_device *udev; 3327 struct usb_device *udev;
3299 3328
@@ -3303,6 +3332,9 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
3303 if (PMSG_IS_AUTO(msg)) 3332 if (PMSG_IS_AUTO(msg))
3304 return -EBUSY; 3333 return -EBUSY;
3305 } 3334 }
3335 if (udev)
3336 hub->wakeup_enabled_descendants +=
3337 wakeup_enabled_descendants(udev);
3306 } 3338 }
3307 3339
3308 if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) { 3340 if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
@@ -4766,7 +4798,8 @@ static void hub_events(void)
4766 hub->ports[i - 1]->child; 4798 hub->ports[i - 1]->child;
4767 4799
4768 dev_dbg(hub_dev, "warm reset port %d\n", i); 4800 dev_dbg(hub_dev, "warm reset port %d\n", i);
4769 if (!udev) { 4801 if (!udev || !(portstatus &
4802 USB_PORT_STAT_CONNECTION)) {
4770 status = hub_port_reset(hub, i, 4803 status = hub_port_reset(hub, i,
4771 NULL, HUB_BH_RESET_TIME, 4804 NULL, HUB_BH_RESET_TIME,
4772 true); 4805 true);
@@ -4776,8 +4809,8 @@ static void hub_events(void)
4776 usb_lock_device(udev); 4809 usb_lock_device(udev);
4777 status = usb_reset_device(udev); 4810 status = usb_reset_device(udev);
4778 usb_unlock_device(udev); 4811 usb_unlock_device(udev);
4812 connect_change = 0;
4779 } 4813 }
4780 connect_change = 0;
4781 } 4814 }
4782 4815
4783 if (connect_change) 4816 if (connect_change)
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 6508e02b3dac..4e4790dea343 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -59,6 +59,9 @@ struct usb_hub {
59 struct usb_tt tt; /* Transaction Translator */ 59 struct usb_tt tt; /* Transaction Translator */
60 60
61 unsigned mA_per_port; /* current for each child */ 61 unsigned mA_per_port; /* current for each child */
62#ifdef CONFIG_PM
63 unsigned wakeup_enabled_descendants;
64#endif
62 65
63 unsigned limited_power:1; 66 unsigned limited_power:1;
64 unsigned quiescing:1; 67 unsigned quiescing:1;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a63598895077..5b44cd47da5b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = {
78 { USB_DEVICE(0x04d8, 0x000c), .driver_info = 78 { USB_DEVICE(0x04d8, 0x000c), .driver_info =
79 USB_QUIRK_CONFIG_INTF_STRINGS }, 79 USB_QUIRK_CONFIG_INTF_STRINGS },
80 80
81 /* CarrolTouch 4000U */
82 { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
83
84 /* CarrolTouch 4500U */
85 { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
86
81 /* Samsung Android phone modem - ID conflict with SPH-I500 */ 87 /* Samsung Android phone modem - ID conflict with SPH-I500 */
82 { USB_DEVICE(0x04e8, 0x6601), .driver_info = 88 { USB_DEVICE(0x04e8, 0x6601), .driver_info =
83 USB_QUIRK_CONFIG_INTF_STRINGS }, 89 USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 757aa18027d0..2378958ea63e 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,6 +1,6 @@
1config USB_DWC3 1config USB_DWC3
2 tristate "DesignWare USB3 DRD Core Support" 2 tristate "DesignWare USB3 DRD Core Support"
3 depends on (USB || USB_GADGET) && GENERIC_HARDIRQS 3 depends on (USB || USB_GADGET) && GENERIC_HARDIRQS && HAS_DMA
4 select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD 4 select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
5 help 5 help
6 Say Y or M here if your system has a Dual Role SuperSpeed 6 Say Y or M here if your system has a Dual Role SuperSpeed
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c35d49d39b76..358375e0b291 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -450,7 +450,7 @@ static int dwc3_probe(struct platform_device *pdev)
450 } 450 }
451 451
452 if (IS_ERR(dwc->usb3_phy)) { 452 if (IS_ERR(dwc->usb3_phy)) {
453 ret = PTR_ERR(dwc->usb2_phy); 453 ret = PTR_ERR(dwc->usb3_phy);
454 454
455 /* 455 /*
456 * if -ENXIO is returned, it means PHY layer wasn't 456 * if -ENXIO is returned, it means PHY layer wasn't
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index b69d322e3cab..27dad993b007 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -759,8 +759,8 @@ struct dwc3 {
759 759
760struct dwc3_event_type { 760struct dwc3_event_type {
761 u32 is_devspec:1; 761 u32 is_devspec:1;
762 u32 type:6; 762 u32 type:7;
763 u32 reserved8_31:25; 763 u32 reserved8_31:24;
764} __packed; 764} __packed;
765 765
766#define DWC3_DEPEVT_XFERCOMPLETE 0x01 766#define DWC3_DEPEVT_XFERCOMPLETE 0x01
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index b5e5b35df49c..f77083fedc68 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1584,6 +1584,7 @@ err1:
1584 __dwc3_gadget_ep_disable(dwc->eps[0]); 1584 __dwc3_gadget_ep_disable(dwc->eps[0]);
1585 1585
1586err0: 1586err0:
1587 dwc->gadget_driver = NULL;
1587 spin_unlock_irqrestore(&dwc->lock, flags); 1588 spin_unlock_irqrestore(&dwc->lock, flags);
1588 1589
1589 return ret; 1590 return ret;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 62f6802f6e0f..8e9368330b10 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -193,6 +193,7 @@ config USB_FUSB300
193 Faraday usb device controller FUSB300 driver 193 Faraday usb device controller FUSB300 driver
194 194
195config USB_FOTG210_UDC 195config USB_FOTG210_UDC
196 depends on HAS_DMA
196 tristate "Faraday FOTG210 USB Peripheral Controller" 197 tristate "Faraday FOTG210 USB Peripheral Controller"
197 help 198 help
198 Faraday USB2.0 OTG controller which can be configured as 199 Faraday USB2.0 OTG controller which can be configured as
@@ -328,13 +329,14 @@ config USB_S3C_HSUDC
328 329
329config USB_MV_UDC 330config USB_MV_UDC
330 tristate "Marvell USB2.0 Device Controller" 331 tristate "Marvell USB2.0 Device Controller"
331 depends on GENERIC_HARDIRQS 332 depends on GENERIC_HARDIRQS && HAS_DMA
332 help 333 help
333 Marvell Socs (including PXA and MMP series) include a high speed 334 Marvell Socs (including PXA and MMP series) include a high speed
334 USB2.0 OTG controller, which can be configured as high speed or 335 USB2.0 OTG controller, which can be configured as high speed or
335 full speed USB peripheral. 336 full speed USB peripheral.
336 337
337config USB_MV_U3D 338config USB_MV_U3D
339 depends on HAS_DMA
338 tristate "MARVELL PXA2128 USB 3.0 controller" 340 tristate "MARVELL PXA2128 USB 3.0 controller"
339 help 341 help
340 MARVELL PXA2128 Processor series include a super speed USB3.0 device 342 MARVELL PXA2128 Processor series include a super speed USB3.0 device
@@ -639,6 +641,7 @@ config USB_CONFIGFS_RNDIS
639 depends on USB_CONFIGFS 641 depends on USB_CONFIGFS
640 depends on NET 642 depends on NET
641 select USB_U_ETHER 643 select USB_U_ETHER
644 select USB_U_RNDIS
642 select USB_F_RNDIS 645 select USB_F_RNDIS
643 help 646 help
644 Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol, 647 Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 073b938f9135..d9a6add0c852 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -870,8 +870,8 @@ static void clk_on(struct at91_udc *udc)
870 if (udc->clocked) 870 if (udc->clocked)
871 return; 871 return;
872 udc->clocked = 1; 872 udc->clocked = 1;
873 clk_enable(udc->iclk); 873 clk_prepare_enable(udc->iclk);
874 clk_enable(udc->fclk); 874 clk_prepare_enable(udc->fclk);
875} 875}
876 876
877static void clk_off(struct at91_udc *udc) 877static void clk_off(struct at91_udc *udc)
@@ -880,8 +880,8 @@ static void clk_off(struct at91_udc *udc)
880 return; 880 return;
881 udc->clocked = 0; 881 udc->clocked = 0;
882 udc->gadget.speed = USB_SPEED_UNKNOWN; 882 udc->gadget.speed = USB_SPEED_UNKNOWN;
883 clk_disable(udc->fclk); 883 clk_disable_unprepare(udc->fclk);
884 clk_disable(udc->iclk); 884 clk_disable_unprepare(udc->iclk);
885} 885}
886 886
887/* 887/*
@@ -1725,7 +1725,7 @@ static int at91udc_probe(struct platform_device *pdev)
1725 /* init software state */ 1725 /* init software state */
1726 udc = &controller; 1726 udc = &controller;
1727 udc->gadget.dev.parent = dev; 1727 udc->gadget.dev.parent = dev;
1728 if (pdev->dev.of_node) 1728 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
1729 at91udc_of_init(udc, pdev->dev.of_node); 1729 at91udc_of_init(udc, pdev->dev.of_node);
1730 else 1730 else
1731 memcpy(&udc->board, dev->platform_data, 1731 memcpy(&udc->board, dev->platform_data,
@@ -1782,12 +1782,14 @@ static int at91udc_probe(struct platform_device *pdev)
1782 } 1782 }
1783 1783
1784 /* don't do anything until we have both gadget driver and VBUS */ 1784 /* don't do anything until we have both gadget driver and VBUS */
1785 clk_enable(udc->iclk); 1785 retval = clk_prepare_enable(udc->iclk);
1786 if (retval)
1787 goto fail1;
1786 at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); 1788 at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
1787 at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff); 1789 at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff);
1788 /* Clear all pending interrupts - UDP may be used by bootloader. */ 1790 /* Clear all pending interrupts - UDP may be used by bootloader. */
1789 at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff); 1791 at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff);
1790 clk_disable(udc->iclk); 1792 clk_disable_unprepare(udc->iclk);
1791 1793
1792 /* request UDC and maybe VBUS irqs */ 1794 /* request UDC and maybe VBUS irqs */
1793 udc->udp_irq = platform_get_irq(pdev, 0); 1795 udc->udp_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index f48712ffe261..c1c113ef950c 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -449,14 +449,20 @@ fail:
449 449
450static int __exit eth_unbind(struct usb_composite_dev *cdev) 450static int __exit eth_unbind(struct usb_composite_dev *cdev)
451{ 451{
452 if (has_rndis()) 452 if (has_rndis()) {
453 usb_put_function(f_rndis);
453 usb_put_function_instance(fi_rndis); 454 usb_put_function_instance(fi_rndis);
454 if (use_eem) 455 }
456 if (use_eem) {
457 usb_put_function(f_eem);
455 usb_put_function_instance(fi_eem); 458 usb_put_function_instance(fi_eem);
456 else if (can_support_ecm(cdev->gadget)) 459 } else if (can_support_ecm(cdev->gadget)) {
460 usb_put_function(f_ecm);
457 usb_put_function_instance(fi_ecm); 461 usb_put_function_instance(fi_ecm);
458 else 462 } else {
463 usb_put_function(f_geth);
459 usb_put_function_instance(fi_geth); 464 usb_put_function_instance(fi_geth);
465 }
460 return 0; 466 return 0;
461} 467}
462 468
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 5d3561ea1c15..edab45da3741 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -959,8 +959,11 @@ static struct usb_function_instance *ecm_alloc_inst(void)
959 mutex_init(&opts->lock); 959 mutex_init(&opts->lock);
960 opts->func_inst.free_func_inst = ecm_free_inst; 960 opts->func_inst.free_func_inst = ecm_free_inst;
961 opts->net = gether_setup_default(); 961 opts->net = gether_setup_default();
962 if (IS_ERR(opts->net)) 962 if (IS_ERR(opts->net)) {
963 return ERR_PTR(PTR_ERR(opts->net)); 963 struct net_device *net = opts->net;
964 kfree(opts);
965 return ERR_CAST(net);
966 }
964 967
965 config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type); 968 config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type);
966 969
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 90ee8022e8d8..d00392d879db 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -593,8 +593,11 @@ static struct usb_function_instance *eem_alloc_inst(void)
593 mutex_init(&opts->lock); 593 mutex_init(&opts->lock);
594 opts->func_inst.free_func_inst = eem_free_inst; 594 opts->func_inst.free_func_inst = eem_free_inst;
595 opts->net = gether_setup_default(); 595 opts->net = gether_setup_default();
596 if (IS_ERR(opts->net)) 596 if (IS_ERR(opts->net)) {
597 return ERR_CAST(opts->net); 597 struct net_device *net = opts->net;
598 kfree(opts);
599 return ERR_CAST(net);
600 }
598 601
599 config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type); 602 config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type);
600 603
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
index 952177f7eb9b..1c28fe13328a 100644
--- a/drivers/usb/gadget/f_ncm.c
+++ b/drivers/usb/gadget/f_ncm.c
@@ -1350,8 +1350,11 @@ static struct usb_function_instance *ncm_alloc_inst(void)
1350 mutex_init(&opts->lock); 1350 mutex_init(&opts->lock);
1351 opts->func_inst.free_func_inst = ncm_free_inst; 1351 opts->func_inst.free_func_inst = ncm_free_inst;
1352 opts->net = gether_setup_default(); 1352 opts->net = gether_setup_default();
1353 if (IS_ERR(opts->net)) 1353 if (IS_ERR(opts->net)) {
1354 return ERR_PTR(PTR_ERR(opts->net)); 1354 struct net_device *net = opts->net;
1355 kfree(opts);
1356 return ERR_CAST(net);
1357 }
1355 1358
1356 config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type); 1359 config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
1357 1360
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 7944fb0efe3b..eb3aa817a662 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -488,7 +488,6 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f)
488 struct usb_ep *ep; 488 struct usb_ep *ep;
489 int status, i; 489 int status, i;
490 490
491#ifndef USBF_PHONET_INCLUDED
492 struct f_phonet_opts *phonet_opts; 491 struct f_phonet_opts *phonet_opts;
493 492
494 phonet_opts = container_of(f->fi, struct f_phonet_opts, func_inst); 493 phonet_opts = container_of(f->fi, struct f_phonet_opts, func_inst);
@@ -507,7 +506,6 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f)
507 return status; 506 return status;
508 phonet_opts->bound = true; 507 phonet_opts->bound = true;
509 } 508 }
510#endif
511 509
512 /* Reserve interface IDs */ 510 /* Reserve interface IDs */
513 status = usb_interface_id(c, f); 511 status = usb_interface_id(c, f);
@@ -656,8 +654,11 @@ static struct usb_function_instance *phonet_alloc_inst(void)
656 654
657 opts->func_inst.free_func_inst = phonet_free_inst; 655 opts->func_inst.free_func_inst = phonet_free_inst;
658 opts->net = gphonet_setup_default(); 656 opts->net = gphonet_setup_default();
659 if (IS_ERR(opts->net)) 657 if (IS_ERR(opts->net)) {
660 return ERR_PTR(PTR_ERR(opts->net)); 658 struct net_device *net = opts->net;
659 kfree(opts);
660 return ERR_CAST(net);
661 }
661 662
662 config_group_init_type_name(&opts->func_inst.group, "", 663 config_group_init_type_name(&opts->func_inst.group, "",
663 &phonet_func_type); 664 &phonet_func_type);
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 191df35ae69d..717ed7f95639 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -963,8 +963,11 @@ static struct usb_function_instance *rndis_alloc_inst(void)
963 mutex_init(&opts->lock); 963 mutex_init(&opts->lock);
964 opts->func_inst.free_func_inst = rndis_free_inst; 964 opts->func_inst.free_func_inst = rndis_free_inst;
965 opts->net = gether_setup_default(); 965 opts->net = gether_setup_default();
966 if (IS_ERR(opts->net)) 966 if (IS_ERR(opts->net)) {
967 return ERR_CAST(opts->net); 967 struct net_device *net = opts->net;
968 kfree(opts);
969 return ERR_CAST(net);
970 }
968 971
969 config_group_init_type_name(&opts->func_inst.group, "", 972 config_group_init_type_name(&opts->func_inst.group, "",
970 &rndis_func_type); 973 &rndis_func_type);
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index 5601e1d96c4f..7c8674fa7e80 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -505,8 +505,11 @@ static struct usb_function_instance *geth_alloc_inst(void)
505 mutex_init(&opts->lock); 505 mutex_init(&opts->lock);
506 opts->func_inst.free_func_inst = geth_free_inst; 506 opts->func_inst.free_func_inst = geth_free_inst;
507 opts->net = gether_setup_default(); 507 opts->net = gether_setup_default();
508 if (IS_ERR(opts->net)) 508 if (IS_ERR(opts->net)) {
509 return ERR_CAST(opts->net); 509 struct net_device *net = opts->net;
510 kfree(opts);
511 return ERR_CAST(net);
512 }
510 513
511 config_group_init_type_name(&opts->func_inst.group, "", 514 config_group_init_type_name(&opts->func_inst.group, "",
512 &gether_func_type); 515 &gether_func_type);
diff --git a/drivers/usb/gadget/fotg210-udc.c b/drivers/usb/gadget/fotg210-udc.c
index cce5535b1dc6..10cd18ddd0d4 100644
--- a/drivers/usb/gadget/fotg210-udc.c
+++ b/drivers/usb/gadget/fotg210-udc.c
@@ -1074,7 +1074,7 @@ static struct usb_gadget_ops fotg210_gadget_ops = {
1074 .udc_stop = fotg210_udc_stop, 1074 .udc_stop = fotg210_udc_stop,
1075}; 1075};
1076 1076
1077static int __exit fotg210_udc_remove(struct platform_device *pdev) 1077static int fotg210_udc_remove(struct platform_device *pdev)
1078{ 1078{
1079 struct fotg210_udc *fotg210 = dev_get_drvdata(&pdev->dev); 1079 struct fotg210_udc *fotg210 = dev_get_drvdata(&pdev->dev);
1080 1080
@@ -1088,7 +1088,7 @@ static int __exit fotg210_udc_remove(struct platform_device *pdev)
1088 return 0; 1088 return 0;
1089} 1089}
1090 1090
1091static int __init fotg210_udc_probe(struct platform_device *pdev) 1091static int fotg210_udc_probe(struct platform_device *pdev)
1092{ 1092{
1093 struct resource *res, *ires; 1093 struct resource *res, *ires;
1094 struct fotg210_udc *fotg210 = NULL; 1094 struct fotg210_udc *fotg210 = NULL;
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 032b96a51ce4..2a1ebefd8f9e 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -160,10 +160,8 @@ static __init int rndis_do_config(struct usb_configuration *c)
160 return ret; 160 return ret;
161 161
162 f_acm_rndis = usb_get_function(fi_acm); 162 f_acm_rndis = usb_get_function(fi_acm);
163 if (IS_ERR(f_acm_rndis)) { 163 if (IS_ERR(f_acm_rndis))
164 ret = PTR_ERR(f_acm_rndis); 164 return PTR_ERR(f_acm_rndis);
165 goto err_func_acm;
166 }
167 165
168 ret = usb_add_function(c, f_acm_rndis); 166 ret = usb_add_function(c, f_acm_rndis);
169 if (ret) 167 if (ret)
@@ -178,7 +176,6 @@ err_fsg:
178 usb_remove_function(c, f_acm_rndis); 176 usb_remove_function(c, f_acm_rndis);
179err_conf: 177err_conf:
180 usb_put_function(f_acm_rndis); 178 usb_put_function(f_acm_rndis);
181err_func_acm:
182 return ret; 179 return ret;
183} 180}
184 181
@@ -226,7 +223,7 @@ static __init int cdc_do_config(struct usb_configuration *c)
226 /* implicit port_num is zero */ 223 /* implicit port_num is zero */
227 f_acm_multi = usb_get_function(fi_acm); 224 f_acm_multi = usb_get_function(fi_acm);
228 if (IS_ERR(f_acm_multi)) 225 if (IS_ERR(f_acm_multi))
229 goto err_func_acm; 226 return PTR_ERR(f_acm_multi);
230 227
231 ret = usb_add_function(c, f_acm_multi); 228 ret = usb_add_function(c, f_acm_multi);
232 if (ret) 229 if (ret)
@@ -241,7 +238,6 @@ err_fsg:
241 usb_remove_function(c, f_acm_multi); 238 usb_remove_function(c, f_acm_multi);
242err_conf: 239err_conf:
243 usb_put_function(f_acm_multi); 240 usb_put_function(f_acm_multi);
244err_func_acm:
245 return ret; 241 return ret;
246} 242}
247 243
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
index 07fdb3eaf48a..ec6a2d290398 100644
--- a/drivers/usb/gadget/mv_u3d_core.c
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -1776,7 +1776,7 @@ static int mv_u3d_remove(struct platform_device *dev)
1776 kfree(u3d->eps); 1776 kfree(u3d->eps);
1777 1777
1778 if (u3d->irq) 1778 if (u3d->irq)
1779 free_irq(u3d->irq, &dev->dev); 1779 free_irq(u3d->irq, u3d);
1780 1780
1781 if (u3d->cap_regs) 1781 if (u3d->cap_regs)
1782 iounmap(u3d->cap_regs); 1782 iounmap(u3d->cap_regs);
@@ -1974,7 +1974,7 @@ static int mv_u3d_probe(struct platform_device *dev)
1974 return 0; 1974 return 0;
1975 1975
1976err_unregister: 1976err_unregister:
1977 free_irq(u3d->irq, &dev->dev); 1977 free_irq(u3d->irq, u3d);
1978err_request_irq: 1978err_request_irq:
1979err_get_irq: 1979err_get_irq:
1980 kfree(u3d->status_req); 1980 kfree(u3d->status_req);
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index ffd8fa541101..13e25f80fc20 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -50,6 +50,8 @@ static DEFINE_MUTEX(udc_lock);
50 50
51/* ------------------------------------------------------------------------- */ 51/* ------------------------------------------------------------------------- */
52 52
53#ifdef CONFIG_HAS_DMA
54
53int usb_gadget_map_request(struct usb_gadget *gadget, 55int usb_gadget_map_request(struct usb_gadget *gadget,
54 struct usb_request *req, int is_in) 56 struct usb_request *req, int is_in)
55{ 57{
@@ -99,13 +101,15 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
99} 101}
100EXPORT_SYMBOL_GPL(usb_gadget_unmap_request); 102EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
101 103
104#endif /* CONFIG_HAS_DMA */
105
102/* ------------------------------------------------------------------------- */ 106/* ------------------------------------------------------------------------- */
103 107
104void usb_gadget_set_state(struct usb_gadget *gadget, 108void usb_gadget_set_state(struct usb_gadget *gadget,
105 enum usb_device_state state) 109 enum usb_device_state state)
106{ 110{
107 gadget->state = state; 111 gadget->state = state;
108 sysfs_notify(&gadget->dev.kobj, NULL, "status"); 112 sysfs_notify(&gadget->dev.kobj, NULL, "state");
109} 113}
110EXPORT_SYMBOL_GPL(usb_gadget_set_state); 114EXPORT_SYMBOL_GPL(usb_gadget_set_state);
111 115
@@ -194,9 +198,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
194 dev_set_name(&gadget->dev, "gadget"); 198 dev_set_name(&gadget->dev, "gadget");
195 gadget->dev.parent = parent; 199 gadget->dev.parent = parent;
196 200
201#ifdef CONFIG_HAS_DMA
197 dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask); 202 dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
198 gadget->dev.dma_parms = parent->dma_parms; 203 gadget->dev.dma_parms = parent->dma_parms;
199 gadget->dev.dma_mask = parent->dma_mask; 204 gadget->dev.dma_mask = parent->dma_mask;
205#endif
200 206
201 if (release) 207 if (release)
202 gadget->dev.release = release; 208 gadget->dev.release = release;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 2b702772d04d..6dce37555c4f 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -874,6 +874,7 @@ static int ehci_hub_control (
874 ehci->reset_done[wIndex] = jiffies 874 ehci->reset_done[wIndex] = jiffies
875 + msecs_to_jiffies(20); 875 + msecs_to_jiffies(20);
876 usb_hcd_start_port_resume(&hcd->self, wIndex); 876 usb_hcd_start_port_resume(&hcd->self, wIndex);
877 set_bit(wIndex, &ehci->resuming_ports);
877 /* check the port again */ 878 /* check the port again */
878 mod_timer(&ehci_to_hcd(ehci)->rh_timer, 879 mod_timer(&ehci_to_hcd(ehci)->rh_timer,
879 ehci->reset_done[wIndex]); 880 ehci->reset_done[wIndex]);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index f80d0330d548..8e3c878f38cf 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1391,21 +1391,20 @@ iso_stream_schedule (
1391 1391
1392 /* Behind the scheduling threshold? */ 1392 /* Behind the scheduling threshold? */
1393 if (unlikely(start < next)) { 1393 if (unlikely(start < next)) {
1394 unsigned now2 = (now - base) & (mod - 1);
1394 1395
1395 /* USB_ISO_ASAP: Round up to the first available slot */ 1396 /* USB_ISO_ASAP: Round up to the first available slot */
1396 if (urb->transfer_flags & URB_ISO_ASAP) 1397 if (urb->transfer_flags & URB_ISO_ASAP)
1397 start += (next - start + period - 1) & -period; 1398 start += (next - start + period - 1) & -period;
1398 1399
1399 /* 1400 /*
1400 * Not ASAP: Use the next slot in the stream. If 1401 * Not ASAP: Use the next slot in the stream,
1401 * the entire URB falls before the threshold, fail. 1402 * no matter what.
1402 */ 1403 */
1403 else if (start + span - period < next) { 1404 else if (start + span - period < now2) {
1404 ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", 1405 ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
1405 urb, start + base, 1406 urb, start + base,
1406 span - period, next + base); 1407 span - period, now2 + base);
1407 status = -EXDEV;
1408 goto fail;
1409 } 1408 }
1410 } 1409 }
1411 1410
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 08613e241894..0f1d193fef02 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -304,6 +304,11 @@ static int __init ohci_pci_init(void)
304 pr_info("%s: " DRIVER_DESC "\n", hcd_name); 304 pr_info("%s: " DRIVER_DESC "\n", hcd_name);
305 305
306 ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); 306 ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
307
308 /* Entries for the PCI suspend/resume callbacks are special */
309 ohci_pci_hc_driver.pci_suspend = ohci_suspend;
310 ohci_pci_hc_driver.pci_resume = ohci_resume;
311
307 return pci_register_driver(&ohci_pci_driver); 312 return pci_register_driver(&ohci_pci_driver);
308} 313}
309module_init(ohci_pci_init); 314module_init(ohci_pci_init);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 4b8a2092432f..978c849f9c9a 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -13,6 +13,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
13void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); 13void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
14void sb800_prefetch(struct device *dev, int on); 14void sb800_prefetch(struct device *dev, int on);
15#else 15#else
16struct pci_dev;
16static inline void usb_amd_quirk_pll_disable(void) {} 17static inline void usb_amd_quirk_pll_disable(void) {}
17static inline void usb_amd_quirk_pll_enable(void) {} 18static inline void usb_amd_quirk_pll_enable(void) {}
18static inline void usb_amd_dev_put(void) {} 19static inline void usb_amd_dev_put(void) {}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index df6978abd7e6..6f8c2fd47675 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -24,6 +24,7 @@
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/dmapool.h> 26#include <linux/dmapool.h>
27#include <linux/dma-mapping.h>
27 28
28#include "xhci.h" 29#include "xhci.h"
29 30
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cc24e39b97d5..f00cb203faea 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -93,7 +93,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
93 } 93 }
94 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 94 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
95 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { 95 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
96 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
97 xhci->quirks |= XHCI_EP_LIMIT_QUIRK; 96 xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
98 xhci->limit_active_eps = 64; 97 xhci->limit_active_eps = 64;
99 xhci->quirks |= XHCI_SW_BW_CHECKING; 98 xhci->quirks |= XHCI_SW_BW_CHECKING;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1e57eafa6910..5b08cd85f8e7 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -434,7 +434,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
434 434
435 /* A ring has pending URBs if its TD list is not empty */ 435 /* A ring has pending URBs if its TD list is not empty */
436 if (!(ep->ep_state & EP_HAS_STREAMS)) { 436 if (!(ep->ep_state & EP_HAS_STREAMS)) {
437 if (!(list_empty(&ep->ring->td_list))) 437 if (ep->ring && !(list_empty(&ep->ring->td_list)))
438 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); 438 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
439 return; 439 return;
440 } 440 }
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2c49f00260ca..9478caa2f71f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -27,6 +27,7 @@
27#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/dmi.h> 29#include <linux/dmi.h>
30#include <linux/dma-mapping.h>
30 31
31#include "xhci.h" 32#include "xhci.h"
32 33
@@ -329,7 +330,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
329 return; 330 return;
330} 331}
331 332
332static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 333static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
333{ 334{
334 int i; 335 int i;
335 336
@@ -1181,9 +1182,6 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1181 } 1182 }
1182 1183
1183 xhci = hcd_to_xhci(hcd); 1184 xhci = hcd_to_xhci(hcd);
1184 if (xhci->xhc_state & XHCI_STATE_HALTED)
1185 return -ENODEV;
1186
1187 if (check_virt_dev) { 1185 if (check_virt_dev) {
1188 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1186 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1189 printk(KERN_DEBUG "xHCI %s called with unaddressed " 1187 printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1199,6 +1197,9 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1199 } 1197 }
1200 } 1198 }
1201 1199
1200 if (xhci->xhc_state & XHCI_STATE_HALTED)
1201 return -ENODEV;
1202
1202 return 1; 1203 return 1;
1203} 1204}
1204 1205
@@ -3898,7 +3899,7 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3898 * Issue an Evaluate Context command to change the Maximum Exit Latency in the 3899 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
3899 * slot context. If that succeeds, store the new MEL in the xhci_virt_device. 3900 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
3900 */ 3901 */
3901static int xhci_change_max_exit_latency(struct xhci_hcd *xhci, 3902static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3902 struct usb_device *udev, u16 max_exit_latency) 3903 struct usb_device *udev, u16 max_exit_latency)
3903{ 3904{
3904 struct xhci_virt_device *virt_dev; 3905 struct xhci_virt_device *virt_dev;
@@ -4892,6 +4893,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4892 4893
4893 get_quirks(dev, xhci); 4894 get_quirks(dev, xhci);
4894 4895
4896 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4897 * success event after a short transfer. This quirk will ignore such
4898 * spurious event.
4899 */
4900 if (xhci->hci_version > 0x96)
4901 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4902
4895 /* Make sure the HC is halted. */ 4903 /* Make sure the HC is halted. */
4896 retval = xhci_halt(xhci); 4904 retval = xhci_halt(xhci);
4897 if (retval) 4905 if (retval)
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index eb3c8c142fa9..eeb27208c0d1 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -830,7 +830,7 @@ static int adu_probe(struct usb_interface *interface,
830 830
831 /* let the user know what node this device is now attached to */ 831 /* let the user know what node this device is now attached to */
832 dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", 832 dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
833 udev->descriptor.idProduct, dev->serial_number, 833 le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
834 (dev->minor - ADU_MINOR_BASE)); 834 (dev->minor - ADU_MINOR_BASE));
835exit: 835exit:
836 dbg(2, " %s : leave, return value %p (dev)", __func__, dev); 836 dbg(2, " %s : leave, return value %p (dev)", __func__, dev);
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index c21386ec5d35..de98906f786d 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = {
3247 { USB_DEVICE(0x0711, 0x0903) }, 3247 { USB_DEVICE(0x0711, 0x0903) },
3248 { USB_DEVICE(0x0711, 0x0918) }, 3248 { USB_DEVICE(0x0711, 0x0918) },
3249 { USB_DEVICE(0x0711, 0x0920) }, 3249 { USB_DEVICE(0x0711, 0x0920) },
3250 { USB_DEVICE(0x0711, 0x0950) },
3250 { USB_DEVICE(0x182d, 0x021c) }, 3251 { USB_DEVICE(0x182d, 0x021c) },
3251 { USB_DEVICE(0x182d, 0x0269) }, 3252 { USB_DEVICE(0x182d, 0x0269) },
3252 { } 3253 { }
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 6708a3b78ad8..f44e8b5e00c9 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -481,7 +481,7 @@ static u64 omap2430_dmamask = DMA_BIT_MASK(32);
481 481
482static int omap2430_probe(struct platform_device *pdev) 482static int omap2430_probe(struct platform_device *pdev)
483{ 483{
484 struct resource musb_resources[2]; 484 struct resource musb_resources[3];
485 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 485 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
486 struct omap_musb_board_data *data; 486 struct omap_musb_board_data *data;
487 struct platform_device *musb; 487 struct platform_device *musb;
@@ -581,6 +581,11 @@ static int omap2430_probe(struct platform_device *pdev)
581 musb_resources[1].end = pdev->resource[1].end; 581 musb_resources[1].end = pdev->resource[1].end;
582 musb_resources[1].flags = pdev->resource[1].flags; 582 musb_resources[1].flags = pdev->resource[1].flags;
583 583
584 musb_resources[2].name = pdev->resource[2].name;
585 musb_resources[2].start = pdev->resource[2].start;
586 musb_resources[2].end = pdev->resource[2].end;
587 musb_resources[2].flags = pdev->resource[2].flags;
588
584 ret = platform_device_add_resources(musb, musb_resources, 589 ret = platform_device_add_resources(musb, musb_resources,
585 ARRAY_SIZE(musb_resources)); 590 ARRAY_SIZE(musb_resources));
586 if (ret) { 591 if (ret) {
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 2c06a8969a9f..6f8a9ca96ae7 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1156,7 +1156,7 @@ static u64 tusb_dmamask = DMA_BIT_MASK(32);
1156 1156
1157static int tusb_probe(struct platform_device *pdev) 1157static int tusb_probe(struct platform_device *pdev)
1158{ 1158{
1159 struct resource musb_resources[2]; 1159 struct resource musb_resources[3];
1160 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 1160 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
1161 struct platform_device *musb; 1161 struct platform_device *musb;
1162 struct tusb6010_glue *glue; 1162 struct tusb6010_glue *glue;
@@ -1199,6 +1199,11 @@ static int tusb_probe(struct platform_device *pdev)
1199 musb_resources[1].end = pdev->resource[1].end; 1199 musb_resources[1].end = pdev->resource[1].end;
1200 musb_resources[1].flags = pdev->resource[1].flags; 1200 musb_resources[1].flags = pdev->resource[1].flags;
1201 1201
1202 musb_resources[2].name = pdev->resource[2].name;
1203 musb_resources[2].start = pdev->resource[2].start;
1204 musb_resources[2].end = pdev->resource[2].end;
1205 musb_resources[2].flags = pdev->resource[2].flags;
1206
1202 ret = platform_device_add_resources(musb, musb_resources, 1207 ret = platform_device_add_resources(musb, musb_resources,
1203 ARRAY_SIZE(musb_resources)); 1208 ARRAY_SIZE(musb_resources));
1204 if (ret) { 1209 if (ret) {
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index ca266280895d..e1859b8ef567 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -15,7 +15,7 @@
15 * 675 Mass Ave, Cambridge, MA 02139, USA. 15 * 675 Mass Ave, Cambridge, MA 02139, USA.
16 */ 16 */
17 17
18#include "otg_fsm.h" 18#include "phy-fsm-usb.h"
19#include <linux/usb/otg.h> 19#include <linux/usb/otg.h>
20#include <linux/ioctl.h> 20#include <linux/ioctl.h>
21 21
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c520b3548e7c..7f4596606e18 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -29,7 +29,7 @@
29#include <linux/usb/gadget.h> 29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h> 30#include <linux/usb/otg.h>
31 31
32#include "phy-otg-fsm.h" 32#include "phy-fsm-usb.h"
33 33
34/* Change USB protocol when there is a protocol change */ 34/* Change USB protocol when there is a protocol change */
35static int otg_set_protocol(struct otg_fsm *fsm, int protocol) 35static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c
index efe6e1464f45..a2fb30bbb971 100644
--- a/drivers/usb/phy/phy-omap-usb3.c
+++ b/drivers/usb/phy/phy-omap-usb3.c
@@ -71,9 +71,9 @@ static struct usb_dpll_params omap_usb3_dpll_params[NUM_SYS_CLKS] = {
71 {1250, 5, 4, 20, 0}, /* 12 MHz */ 71 {1250, 5, 4, 20, 0}, /* 12 MHz */
72 {3125, 20, 4, 20, 0}, /* 16.8 MHz */ 72 {3125, 20, 4, 20, 0}, /* 16.8 MHz */
73 {1172, 8, 4, 20, 65537}, /* 19.2 MHz */ 73 {1172, 8, 4, 20, 65537}, /* 19.2 MHz */
74 {1000, 7, 4, 10, 0}, /* 20 MHz */
74 {1250, 12, 4, 20, 0}, /* 26 MHz */ 75 {1250, 12, 4, 20, 0}, /* 26 MHz */
75 {3125, 47, 4, 20, 92843}, /* 38.4 MHz */ 76 {3125, 47, 4, 20, 92843}, /* 38.4 MHz */
76 {1000, 7, 4, 10, 0}, /* 20 MHz */
77 77
78}; 78};
79 79
diff --git a/drivers/usb/phy/phy-samsung-usb2.c b/drivers/usb/phy/phy-samsung-usb2.c
index 1011c16ade7e..758b86d0fcb3 100644
--- a/drivers/usb/phy/phy-samsung-usb2.c
+++ b/drivers/usb/phy/phy-samsung-usb2.c
@@ -388,7 +388,7 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
388 clk = devm_clk_get(dev, "otg"); 388 clk = devm_clk_get(dev, "otg");
389 389
390 if (IS_ERR(clk)) { 390 if (IS_ERR(clk)) {
391 dev_err(dev, "Failed to get otg clock\n"); 391 dev_err(dev, "Failed to get usbhost/otg clock\n");
392 return PTR_ERR(clk); 392 return PTR_ERR(clk);
393 } 393 }
394 394
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index ed4949faa70d..805940c37353 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -855,10 +855,6 @@ static int usbhsg_gadget_stop(struct usb_gadget *gadget,
855 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); 855 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
856 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); 856 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
857 857
858 if (!driver ||
859 !driver->unbind)
860 return -EINVAL;
861
862 usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD); 858 usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
863 gpriv->driver = NULL; 859 gpriv->driver = NULL;
864 860
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 8c3a42ea910c..7eef9b33fde6 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -719,6 +719,13 @@ config USB_SERIAL_FLASHLOADER
719 To compile this driver as a module, choose M here: the 719 To compile this driver as a module, choose M here: the
720 module will be called flashloader. 720 module will be called flashloader.
721 721
722config USB_SERIAL_SUUNTO
723 tristate "USB Suunto ANT+ driver"
724 help
725 Say Y here if you want to use the Suunto ANT+ USB device.
726
727 To compile this driver as a module, choose M here: the
728 module will be called suunto.
722 729
723config USB_SERIAL_DEBUG 730config USB_SERIAL_DEBUG
724 tristate "USB Debugging Device" 731 tristate "USB Debugging Device"
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index f7130114488f..a14a870d993f 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
54obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o 54obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o
55obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o 55obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o
56obj-$(CONFIG_USB_SERIAL_SSU100) += ssu100.o 56obj-$(CONFIG_USB_SERIAL_SSU100) += ssu100.o
57obj-$(CONFIG_USB_SERIAL_SUUNTO) += suunto.o
57obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o 58obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o
58obj-$(CONFIG_USB_SERIAL_WWAN) += usb_wwan.o 59obj-$(CONFIG_USB_SERIAL_WWAN) += usb_wwan.o
59obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o 60obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index d6ef2f8da37d..0eae4ba3760e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {
53 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 53 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
54 { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 54 { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
55 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ 55 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
56 { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
56 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ 57 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
57 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ 58 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
58 { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ 59 { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
@@ -118,6 +119,8 @@ static const struct usb_device_id id_table[] = {
118 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 119 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
119 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ 120 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
120 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ 121 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
122 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
123 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
121 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 124 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
122 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 125 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
123 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 126 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
@@ -148,6 +151,7 @@ static const struct usb_device_id id_table[] = {
148 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ 151 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
149 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 152 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
150 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 153 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
154 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
151 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ 155 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
152 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ 156 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
153 { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ 157 { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7260ec660347..b65e657c641d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -735,9 +735,34 @@ static struct usb_device_id id_table_combined [] = {
735 { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), 735 { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
736 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, 736 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
737 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, 737 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
738 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, 738 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
739 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, 739 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
740 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) }, 740 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
741 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) },
742 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) },
743 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) },
744 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) },
745 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) },
746 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) },
747 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) },
748 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) },
749 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) },
750 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) },
751 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) },
752 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) },
753 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) },
754 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) },
755 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) },
756 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) },
757 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) },
758 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) },
759 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) },
760 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) },
761 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) },
762 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) },
763 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) },
764 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) },
765 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) },
741 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, 766 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
742 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, 767 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
743 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, 768 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6dd79253205d..1b8af461b522 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -815,11 +815,35 @@
815/* 815/*
816 * RT Systems programming cables for various ham radios 816 * RT Systems programming cables for various ham radios
817 */ 817 */
818#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ 818#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
819#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ 819#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */
820#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ 820#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */
821#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */ 821#define RTSYSTEMS_USB_57A_PID 0x9e51 /* USB-57A USB to 4pin 3.5mm plug */
822 822#define RTSYSTEMS_USB_57B_PID 0x9e52 /* USB-57B USB to extended 4pin 3.5mm plug */
823#define RTSYSTEMS_USB_29A_PID 0x9e53 /* USB-29A USB to 3.5mm stereo plug */
824#define RTSYSTEMS_USB_29B_PID 0x9e54 /* USB-29B USB to 6 pin mini din */
825#define RTSYSTEMS_USB_29F_PID 0x9e55 /* USB-29F USB to 6 pin modular plug */
826#define RTSYSTEMS_USB_62B_PID 0x9e56 /* USB-62B USB to 8 pin mini din plug*/
827#define RTSYSTEMS_USB_S01_PID 0x9e57 /* USB-RTS01 USB to 3.5 mm stereo plug*/
828#define RTSYSTEMS_USB_63_PID 0x9e58 /* USB-63 USB to 9 pin female*/
829#define RTSYSTEMS_USB_29C_PID 0x9e59 /* USB-29C USB to 4 pin modular plug*/
830#define RTSYSTEMS_USB_81B_PID 0x9e5A /* USB-81 USB to 8 pin mini din plug*/
831#define RTSYSTEMS_USB_82B_PID 0x9e5B /* USB-82 USB to 2.5 mm stereo plug*/
832#define RTSYSTEMS_USB_K5D_PID 0x9e5C /* USB-K5D USB to 8 pin modular plug*/
833#define RTSYSTEMS_USB_K4Y_PID 0x9e5D /* USB-K4Y USB to 2.5/3.5 mm plugs*/
834#define RTSYSTEMS_USB_K5G_PID 0x9e5E /* USB-K5G USB to 8 pin modular plug*/
835#define RTSYSTEMS_USB_S05_PID 0x9e5F /* USB-RTS05 USB to 2.5 mm stereo plug*/
836#define RTSYSTEMS_USB_60_PID 0x9e60 /* USB-60 USB to 6 pin din*/
837#define RTSYSTEMS_USB_61_PID 0x9e61 /* USB-61 USB to 6 pin mini din*/
838#define RTSYSTEMS_USB_62_PID 0x9e62 /* USB-62 USB to 8 pin mini din*/
839#define RTSYSTEMS_USB_63B_PID 0x9e63 /* USB-63 USB to 9 pin female*/
840#define RTSYSTEMS_USB_64_PID 0x9e64 /* USB-64 USB to 9 pin male*/
841#define RTSYSTEMS_USB_65_PID 0x9e65 /* USB-65 USB to 9 pin female null modem*/
842#define RTSYSTEMS_USB_92_PID 0x9e66 /* USB-92 USB to 12 pin plug*/
843#define RTSYSTEMS_USB_92D_PID 0x9e67 /* USB-92D USB to 12 pin plug data*/
844#define RTSYSTEMS_USB_W5R_PID 0x9e68 /* USB-W5R USB to 8 pin modular plug*/
845#define RTSYSTEMS_USB_A5R_PID 0x9e69 /* USB-A5R USB to 8 pin modular plug*/
846#define RTSYSTEMS_USB_PW1_PID 0x9e6A /* USB-PW1 USB to 8 pin modular plug*/
823 847
824/* 848/*
825 * Physik Instrumente 849 * Physik Instrumente
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 5a979729f8ec..58c17fdc85eb 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2303,7 +2303,7 @@ static int keyspan_startup(struct usb_serial *serial)
2303 if (d_details == NULL) { 2303 if (d_details == NULL) {
2304 dev_err(&serial->dev->dev, "%s - unknown product id %x\n", 2304 dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
2305 __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); 2305 __func__, le16_to_cpu(serial->dev->descriptor.idProduct));
2306 return 1; 2306 return -ENODEV;
2307 } 2307 }
2308 2308
2309 /* Setup private data for serial driver */ 2309 /* Setup private data for serial driver */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 51da424327b0..b01300164fc0 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -90,6 +90,7 @@ struct urbtracker {
90 struct list_head urblist_entry; 90 struct list_head urblist_entry;
91 struct kref ref_count; 91 struct kref ref_count;
92 struct urb *urb; 92 struct urb *urb;
93 struct usb_ctrlrequest *setup;
93}; 94};
94 95
95enum mos7715_pp_modes { 96enum mos7715_pp_modes {
@@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref)
271 struct mos7715_parport *mos_parport = urbtrack->mos_parport; 272 struct mos7715_parport *mos_parport = urbtrack->mos_parport;
272 273
273 usb_free_urb(urbtrack->urb); 274 usb_free_urb(urbtrack->urb);
275 kfree(urbtrack->setup);
274 kfree(urbtrack); 276 kfree(urbtrack);
275 kref_put(&mos_parport->ref_count, destroy_mos_parport); 277 kref_put(&mos_parport->ref_count, destroy_mos_parport);
276} 278}
@@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
355 struct urbtracker *urbtrack; 357 struct urbtracker *urbtrack;
356 int ret_val; 358 int ret_val;
357 unsigned long flags; 359 unsigned long flags;
358 struct usb_ctrlrequest setup;
359 struct usb_serial *serial = mos_parport->serial; 360 struct usb_serial *serial = mos_parport->serial;
360 struct usb_device *usbdev = serial->dev; 361 struct usb_device *usbdev = serial->dev;
361 362
@@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
373 kfree(urbtrack); 374 kfree(urbtrack);
374 return -ENOMEM; 375 return -ENOMEM;
375 } 376 }
376 setup.bRequestType = (__u8)0x40; 377 urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
377 setup.bRequest = (__u8)0x0e; 378 if (!urbtrack->setup) {
378 setup.wValue = get_reg_value(reg, dummy); 379 usb_free_urb(urbtrack->urb);
379 setup.wIndex = get_reg_index(reg); 380 kfree(urbtrack);
380 setup.wLength = 0; 381 return -ENOMEM;
382 }
383 urbtrack->setup->bRequestType = (__u8)0x40;
384 urbtrack->setup->bRequest = (__u8)0x0e;
385 urbtrack->setup->wValue = get_reg_value(reg, dummy);
386 urbtrack->setup->wIndex = get_reg_index(reg);
387 urbtrack->setup->wLength = 0;
381 usb_fill_control_urb(urbtrack->urb, usbdev, 388 usb_fill_control_urb(urbtrack->urb, usbdev,
382 usb_sndctrlpipe(usbdev, 0), 389 usb_sndctrlpipe(usbdev, 0),
383 (unsigned char *)&setup, 390 (unsigned char *)urbtrack->setup,
384 NULL, 0, async_complete, urbtrack); 391 NULL, 0, async_complete, urbtrack);
385 kref_init(&urbtrack->ref_count); 392 kref_init(&urbtrack->ref_count);
386 INIT_LIST_HEAD(&urbtrack->urblist_entry); 393 INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 0a818b238508..3bac4693c038 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -183,7 +183,10 @@
183#define LED_ON_MS 500 183#define LED_ON_MS 500
184#define LED_OFF_MS 500 184#define LED_OFF_MS 500
185 185
186static int device_type; 186enum mos7840_flag {
187 MOS7840_FLAG_CTRL_BUSY,
188 MOS7840_FLAG_LED_BUSY,
189};
187 190
188static const struct usb_device_id id_table[] = { 191static const struct usb_device_id id_table[] = {
189 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 192 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
@@ -238,9 +241,12 @@ struct moschip_port {
238 241
239 /* For device(s) with LED indicator */ 242 /* For device(s) with LED indicator */
240 bool has_led; 243 bool has_led;
241 bool led_flag;
242 struct timer_list led_timer1; /* Timer for LED on */ 244 struct timer_list led_timer1; /* Timer for LED on */
243 struct timer_list led_timer2; /* Timer for LED off */ 245 struct timer_list led_timer2; /* Timer for LED off */
246 struct urb *led_urb;
247 struct usb_ctrlrequest *led_dr;
248
249 unsigned long flags;
244}; 250};
245 251
246/* 252/*
@@ -460,10 +466,10 @@ static void mos7840_control_callback(struct urb *urb)
460 case -ESHUTDOWN: 466 case -ESHUTDOWN:
461 /* this urb is terminated, clean up */ 467 /* this urb is terminated, clean up */
462 dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status); 468 dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
463 return; 469 goto out;
464 default: 470 default:
465 dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status); 471 dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
466 return; 472 goto out;
467 } 473 }
468 474
469 dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); 475 dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
@@ -476,6 +482,8 @@ static void mos7840_control_callback(struct urb *urb)
476 mos7840_handle_new_msr(mos7840_port, regval); 482 mos7840_handle_new_msr(mos7840_port, regval);
477 else if (mos7840_port->MsrLsr == 1) 483 else if (mos7840_port->MsrLsr == 1)
478 mos7840_handle_new_lsr(mos7840_port, regval); 484 mos7840_handle_new_lsr(mos7840_port, regval);
485out:
486 clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
479} 487}
480 488
481static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, 489static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
@@ -486,6 +494,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
486 unsigned char *buffer = mcs->ctrl_buf; 494 unsigned char *buffer = mcs->ctrl_buf;
487 int ret; 495 int ret;
488 496
497 if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
498 return -EBUSY;
499
489 dr->bRequestType = MCS_RD_RTYPE; 500 dr->bRequestType = MCS_RD_RTYPE;
490 dr->bRequest = MCS_RDREQ; 501 dr->bRequest = MCS_RDREQ;
491 dr->wValue = cpu_to_le16(Wval); /* 0 */ 502 dr->wValue = cpu_to_le16(Wval); /* 0 */
@@ -497,6 +508,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
497 mos7840_control_callback, mcs); 508 mos7840_control_callback, mcs);
498 mcs->control_urb->transfer_buffer_length = 2; 509 mcs->control_urb->transfer_buffer_length = 2;
499 ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC); 510 ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
511 if (ret)
512 clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
513
500 return ret; 514 return ret;
501} 515}
502 516
@@ -523,7 +537,7 @@ static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval,
523 __u16 reg) 537 __u16 reg)
524{ 538{
525 struct usb_device *dev = mcs->port->serial->dev; 539 struct usb_device *dev = mcs->port->serial->dev;
526 struct usb_ctrlrequest *dr = mcs->dr; 540 struct usb_ctrlrequest *dr = mcs->led_dr;
527 541
528 dr->bRequestType = MCS_WR_RTYPE; 542 dr->bRequestType = MCS_WR_RTYPE;
529 dr->bRequest = MCS_WRREQ; 543 dr->bRequest = MCS_WRREQ;
@@ -531,10 +545,10 @@ static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval,
531 dr->wIndex = cpu_to_le16(reg); 545 dr->wIndex = cpu_to_le16(reg);
532 dr->wLength = cpu_to_le16(0); 546 dr->wLength = cpu_to_le16(0);
533 547
534 usb_fill_control_urb(mcs->control_urb, dev, usb_sndctrlpipe(dev, 0), 548 usb_fill_control_urb(mcs->led_urb, dev, usb_sndctrlpipe(dev, 0),
535 (unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL); 549 (unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL);
536 550
537 usb_submit_urb(mcs->control_urb, GFP_ATOMIC); 551 usb_submit_urb(mcs->led_urb, GFP_ATOMIC);
538} 552}
539 553
540static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg, 554static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
@@ -560,7 +574,19 @@ static void mos7840_led_flag_off(unsigned long arg)
560{ 574{
561 struct moschip_port *mcs = (struct moschip_port *) arg; 575 struct moschip_port *mcs = (struct moschip_port *) arg;
562 576
563 mcs->led_flag = false; 577 clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
578}
579
580static void mos7840_led_activity(struct usb_serial_port *port)
581{
582 struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
583
584 if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags))
585 return;
586
587 mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER);
588 mod_timer(&mos7840_port->led_timer1,
589 jiffies + msecs_to_jiffies(LED_ON_MS));
564} 590}
565 591
566/***************************************************************************** 592/*****************************************************************************
@@ -758,14 +784,8 @@ static void mos7840_bulk_in_callback(struct urb *urb)
758 return; 784 return;
759 } 785 }
760 786
761 /* Turn on LED */ 787 if (mos7840_port->has_led)
762 if (mos7840_port->has_led && !mos7840_port->led_flag) { 788 mos7840_led_activity(port);
763 mos7840_port->led_flag = true;
764 mos7840_set_led_async(mos7840_port, 0x0301,
765 MODEM_CONTROL_REGISTER);
766 mod_timer(&mos7840_port->led_timer1,
767 jiffies + msecs_to_jiffies(LED_ON_MS));
768 }
769 789
770 mos7840_port->read_urb_busy = true; 790 mos7840_port->read_urb_busy = true;
771 retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); 791 retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
@@ -816,18 +836,6 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
816/************************************************************************/ 836/************************************************************************/
817/* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */ 837/* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */
818/************************************************************************/ 838/************************************************************************/
819#ifdef MCSSerialProbe
820static int mos7840_serial_probe(struct usb_serial *serial,
821 const struct usb_device_id *id)
822{
823
824 /*need to implement the mode_reg reading and updating\
825 structures usb_serial_ device_type\
826 (i.e num_ports, num_bulkin,bulkout etc) */
827 /* Also we can update the changes attach */
828 return 1;
829}
830#endif
831 839
832/***************************************************************************** 840/*****************************************************************************
833 * mos7840_open 841 * mos7840_open
@@ -905,20 +913,20 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
905 status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); 913 status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
906 if (status < 0) { 914 if (status < 0) {
907 dev_dbg(&port->dev, "Reading Spreg failed\n"); 915 dev_dbg(&port->dev, "Reading Spreg failed\n");
908 return -1; 916 goto err;
909 } 917 }
910 Data |= 0x80; 918 Data |= 0x80;
911 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); 919 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
912 if (status < 0) { 920 if (status < 0) {
913 dev_dbg(&port->dev, "writing Spreg failed\n"); 921 dev_dbg(&port->dev, "writing Spreg failed\n");
914 return -1; 922 goto err;
915 } 923 }
916 924
917 Data &= ~0x80; 925 Data &= ~0x80;
918 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); 926 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
919 if (status < 0) { 927 if (status < 0) {
920 dev_dbg(&port->dev, "writing Spreg failed\n"); 928 dev_dbg(&port->dev, "writing Spreg failed\n");
921 return -1; 929 goto err;
922 } 930 }
923 /* End of block to be checked */ 931 /* End of block to be checked */
924 932
@@ -927,7 +935,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
927 &Data); 935 &Data);
928 if (status < 0) { 936 if (status < 0) {
929 dev_dbg(&port->dev, "Reading Controlreg failed\n"); 937 dev_dbg(&port->dev, "Reading Controlreg failed\n");
930 return -1; 938 goto err;
931 } 939 }
932 Data |= 0x08; /* Driver done bit */ 940 Data |= 0x08; /* Driver done bit */
933 Data |= 0x20; /* rx_disable */ 941 Data |= 0x20; /* rx_disable */
@@ -935,7 +943,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
935 mos7840_port->ControlRegOffset, Data); 943 mos7840_port->ControlRegOffset, Data);
936 if (status < 0) { 944 if (status < 0) {
937 dev_dbg(&port->dev, "writing Controlreg failed\n"); 945 dev_dbg(&port->dev, "writing Controlreg failed\n");
938 return -1; 946 goto err;
939 } 947 }
940 /* do register settings here */ 948 /* do register settings here */
941 /* Set all regs to the device default values. */ 949 /* Set all regs to the device default values. */
@@ -946,21 +954,21 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
946 status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); 954 status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
947 if (status < 0) { 955 if (status < 0) {
948 dev_dbg(&port->dev, "disabling interrupts failed\n"); 956 dev_dbg(&port->dev, "disabling interrupts failed\n");
949 return -1; 957 goto err;
950 } 958 }
951 /* Set FIFO_CONTROL_REGISTER to the default value */ 959 /* Set FIFO_CONTROL_REGISTER to the default value */
952 Data = 0x00; 960 Data = 0x00;
953 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); 961 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
954 if (status < 0) { 962 if (status < 0) {
955 dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); 963 dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
956 return -1; 964 goto err;
957 } 965 }
958 966
959 Data = 0xcf; 967 Data = 0xcf;
960 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); 968 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
961 if (status < 0) { 969 if (status < 0) {
962 dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); 970 dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
963 return -1; 971 goto err;
964 } 972 }
965 973
966 Data = 0x03; 974 Data = 0x03;
@@ -1103,6 +1111,15 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
1103 /* mos7840_change_port_settings(mos7840_port,old_termios); */ 1111 /* mos7840_change_port_settings(mos7840_port,old_termios); */
1104 1112
1105 return 0; 1113 return 0;
1114err:
1115 for (j = 0; j < NUM_URBS; ++j) {
1116 urb = mos7840_port->write_urb_pool[j];
1117 if (!urb)
1118 continue;
1119 kfree(urb->transfer_buffer);
1120 usb_free_urb(urb);
1121 }
1122 return status;
1106} 1123}
1107 1124
1108/***************************************************************************** 1125/*****************************************************************************
@@ -1445,13 +1462,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
1445 data1 = urb->transfer_buffer; 1462 data1 = urb->transfer_buffer;
1446 dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress); 1463 dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress);
1447 1464
1448 /* Turn on LED */ 1465 if (mos7840_port->has_led)
1449 if (mos7840_port->has_led && !mos7840_port->led_flag) { 1466 mos7840_led_activity(port);
1450 mos7840_port->led_flag = true;
1451 mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0301);
1452 mod_timer(&mos7840_port->led_timer1,
1453 jiffies + msecs_to_jiffies(LED_ON_MS));
1454 }
1455 1467
1456 /* send it down the pipe */ 1468 /* send it down the pipe */
1457 status = usb_submit_urb(urb, GFP_ATOMIC); 1469 status = usb_submit_urb(urb, GFP_ATOMIC);
@@ -2178,38 +2190,48 @@ static int mos7810_check(struct usb_serial *serial)
2178 return 0; 2190 return 0;
2179} 2191}
2180 2192
2181static int mos7840_calc_num_ports(struct usb_serial *serial) 2193static int mos7840_probe(struct usb_serial *serial,
2194 const struct usb_device_id *id)
2182{ 2195{
2183 __u16 data = 0x00; 2196 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
2184 u8 *buf; 2197 u8 *buf;
2185 int mos7840_num_ports; 2198 int device_type;
2199
2200 if (product == MOSCHIP_DEVICE_ID_7810 ||
2201 product == MOSCHIP_DEVICE_ID_7820) {
2202 device_type = product;
2203 goto out;
2204 }
2186 2205
2187 buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); 2206 buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
2188 if (buf) { 2207 if (!buf)
2189 usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 2208 return -ENOMEM;
2209
2210 usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
2190 MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, 2211 MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
2191 VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); 2212 VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
2192 data = *buf;
2193 kfree(buf);
2194 }
2195 2213
2196 if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 || 2214 /* For a MCS7840 device GPIO0 must be set to 1 */
2197 serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) { 2215 if (buf[0] & 0x01)
2198 device_type = serial->dev->descriptor.idProduct; 2216 device_type = MOSCHIP_DEVICE_ID_7840;
2199 } else { 2217 else if (mos7810_check(serial))
2200 /* For a MCS7840 device GPIO0 must be set to 1 */ 2218 device_type = MOSCHIP_DEVICE_ID_7810;
2201 if ((data & 0x01) == 1) 2219 else
2202 device_type = MOSCHIP_DEVICE_ID_7840; 2220 device_type = MOSCHIP_DEVICE_ID_7820;
2203 else if (mos7810_check(serial)) 2221
2204 device_type = MOSCHIP_DEVICE_ID_7810; 2222 kfree(buf);
2205 else 2223out:
2206 device_type = MOSCHIP_DEVICE_ID_7820; 2224 usb_set_serial_data(serial, (void *)(unsigned long)device_type);
2207 } 2225
2226 return 0;
2227}
2228
2229static int mos7840_calc_num_ports(struct usb_serial *serial)
2230{
2231 int device_type = (unsigned long)usb_get_serial_data(serial);
2232 int mos7840_num_ports;
2208 2233
2209 mos7840_num_ports = (device_type >> 4) & 0x000F; 2234 mos7840_num_ports = (device_type >> 4) & 0x000F;
2210 serial->num_bulk_in = mos7840_num_ports;
2211 serial->num_bulk_out = mos7840_num_ports;
2212 serial->num_ports = mos7840_num_ports;
2213 2235
2214 return mos7840_num_ports; 2236 return mos7840_num_ports;
2215} 2237}
@@ -2217,6 +2239,7 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
2217static int mos7840_port_probe(struct usb_serial_port *port) 2239static int mos7840_port_probe(struct usb_serial_port *port)
2218{ 2240{
2219 struct usb_serial *serial = port->serial; 2241 struct usb_serial *serial = port->serial;
2242 int device_type = (unsigned long)usb_get_serial_data(serial);
2220 struct moschip_port *mos7840_port; 2243 struct moschip_port *mos7840_port;
2221 int status; 2244 int status;
2222 int pnum; 2245 int pnum;
@@ -2392,6 +2415,14 @@ static int mos7840_port_probe(struct usb_serial_port *port)
2392 if (device_type == MOSCHIP_DEVICE_ID_7810) { 2415 if (device_type == MOSCHIP_DEVICE_ID_7810) {
2393 mos7840_port->has_led = true; 2416 mos7840_port->has_led = true;
2394 2417
2418 mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
2419 mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
2420 GFP_KERNEL);
2421 if (!mos7840_port->led_urb || !mos7840_port->led_dr) {
2422 status = -ENOMEM;
2423 goto error;
2424 }
2425
2395 init_timer(&mos7840_port->led_timer1); 2426 init_timer(&mos7840_port->led_timer1);
2396 mos7840_port->led_timer1.function = mos7840_led_off; 2427 mos7840_port->led_timer1.function = mos7840_led_off;
2397 mos7840_port->led_timer1.expires = 2428 mos7840_port->led_timer1.expires =
@@ -2404,8 +2435,6 @@ static int mos7840_port_probe(struct usb_serial_port *port)
2404 jiffies + msecs_to_jiffies(LED_OFF_MS); 2435 jiffies + msecs_to_jiffies(LED_OFF_MS);
2405 mos7840_port->led_timer2.data = (unsigned long)mos7840_port; 2436 mos7840_port->led_timer2.data = (unsigned long)mos7840_port;
2406 2437
2407 mos7840_port->led_flag = false;
2408
2409 /* Turn off LED */ 2438 /* Turn off LED */
2410 mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300); 2439 mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
2411 } 2440 }
@@ -2427,6 +2456,8 @@ out:
2427 } 2456 }
2428 return 0; 2457 return 0;
2429error: 2458error:
2459 kfree(mos7840_port->led_dr);
2460 usb_free_urb(mos7840_port->led_urb);
2430 kfree(mos7840_port->dr); 2461 kfree(mos7840_port->dr);
2431 kfree(mos7840_port->ctrl_buf); 2462 kfree(mos7840_port->ctrl_buf);
2432 usb_free_urb(mos7840_port->control_urb); 2463 usb_free_urb(mos7840_port->control_urb);
@@ -2447,6 +2478,10 @@ static int mos7840_port_remove(struct usb_serial_port *port)
2447 2478
2448 del_timer_sync(&mos7840_port->led_timer1); 2479 del_timer_sync(&mos7840_port->led_timer1);
2449 del_timer_sync(&mos7840_port->led_timer2); 2480 del_timer_sync(&mos7840_port->led_timer2);
2481
2482 usb_kill_urb(mos7840_port->led_urb);
2483 usb_free_urb(mos7840_port->led_urb);
2484 kfree(mos7840_port->led_dr);
2450 } 2485 }
2451 usb_kill_urb(mos7840_port->control_urb); 2486 usb_kill_urb(mos7840_port->control_urb);
2452 usb_free_urb(mos7840_port->control_urb); 2487 usb_free_urb(mos7840_port->control_urb);
@@ -2473,9 +2508,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
2473 .throttle = mos7840_throttle, 2508 .throttle = mos7840_throttle,
2474 .unthrottle = mos7840_unthrottle, 2509 .unthrottle = mos7840_unthrottle,
2475 .calc_num_ports = mos7840_calc_num_ports, 2510 .calc_num_ports = mos7840_calc_num_ports,
2476#ifdef MCSSerialProbe 2511 .probe = mos7840_probe,
2477 .probe = mos7840_serial_probe,
2478#endif
2479 .ioctl = mos7840_ioctl, 2512 .ioctl = mos7840_ioctl,
2480 .set_termios = mos7840_set_termios, 2513 .set_termios = mos7840_set_termios,
2481 .break_ctl = mos7840_break, 2514 .break_ctl = mos7840_break,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5dd857de05b0..1cf6f125f5f0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -341,17 +341,12 @@ static void option_instat_callback(struct urb *urb);
341#define OLIVETTI_VENDOR_ID 0x0b3c 341#define OLIVETTI_VENDOR_ID 0x0b3c
342#define OLIVETTI_PRODUCT_OLICARD100 0xc000 342#define OLIVETTI_PRODUCT_OLICARD100 0xc000
343#define OLIVETTI_PRODUCT_OLICARD145 0xc003 343#define OLIVETTI_PRODUCT_OLICARD145 0xc003
344#define OLIVETTI_PRODUCT_OLICARD200 0xc005
344 345
345/* Celot products */ 346/* Celot products */
346#define CELOT_VENDOR_ID 0x211f 347#define CELOT_VENDOR_ID 0x211f
347#define CELOT_PRODUCT_CT680M 0x6801 348#define CELOT_PRODUCT_CT680M 0x6801
348 349
349/* ONDA Communication vendor id */
350#define ONDA_VENDOR_ID 0x1ee8
351
352/* ONDA MT825UP HSDPA 14.2 modem */
353#define ONDA_MT825UP 0x000b
354
355/* Samsung products */ 350/* Samsung products */
356#define SAMSUNG_VENDOR_ID 0x04e8 351#define SAMSUNG_VENDOR_ID 0x04e8
357#define SAMSUNG_PRODUCT_GT_B3730 0x6889 352#define SAMSUNG_PRODUCT_GT_B3730 0x6889
@@ -444,7 +439,8 @@ static void option_instat_callback(struct urb *urb);
444 439
445/* Hyundai Petatel Inc. products */ 440/* Hyundai Petatel Inc. products */
446#define PETATEL_VENDOR_ID 0x1ff4 441#define PETATEL_VENDOR_ID 0x1ff4
447#define PETATEL_PRODUCT_NP10T 0x600e 442#define PETATEL_PRODUCT_NP10T_600A 0x600a
443#define PETATEL_PRODUCT_NP10T_600E 0x600e
448 444
449/* TP-LINK Incorporated products */ 445/* TP-LINK Incorporated products */
450#define TPLINK_VENDOR_ID 0x2357 446#define TPLINK_VENDOR_ID 0x2357
@@ -782,6 +778,7 @@ static const struct usb_device_id option_ids[] = {
782 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 778 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
783 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 779 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
784 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 780 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
781 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
785 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 782 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
786 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ 783 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
787 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, 784 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
@@ -817,7 +814,8 @@ static const struct usb_device_id option_ids[] = {
817 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), 814 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
818 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 815 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
819 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, 816 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
820 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, 817 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
818 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
821 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, 819 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
822 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff), 820 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
823 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 821 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1256,8 +1254,8 @@ static const struct usb_device_id option_ids[] = {
1256 1254
1257 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 1255 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
1258 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, 1256 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
1257 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
1259 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1258 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1260 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
1261 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1259 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1262 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, 1260 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
1263 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) }, 1261 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
@@ -1329,9 +1327,12 @@ static const struct usb_device_id option_ids[] = {
1329 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, 1327 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
1330 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, 1328 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
1331 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, 1329 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
1332 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, 1330 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
1331 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
1333 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), 1332 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
1334 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1333 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1334 { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
1335 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1335 { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, 1336 { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
1336 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */ 1337 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
1337 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */ 1338 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
@@ -1339,6 +1340,8 @@ static const struct usb_device_id option_ids[] = {
1339 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, 1340 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
1340 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, 1341 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
1341 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, 1342 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1343 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1344 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1342 { } /* Terminating entry */ 1345 { } /* Terminating entry */
1343}; 1346};
1344MODULE_DEVICE_TABLE(usb, option_ids); 1347MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/suunto.c b/drivers/usb/serial/suunto.c
new file mode 100644
index 000000000000..2248e7a7d5ad
--- /dev/null
+++ b/drivers/usb/serial/suunto.c
@@ -0,0 +1,41 @@
1/*
2 * Suunto ANT+ USB Driver
3 *
4 * Copyright (C) 2013 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
5 * Copyright (C) 2013 Linux Foundation
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation only.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/tty.h>
15#include <linux/module.h>
16#include <linux/usb.h>
17#include <linux/usb/serial.h>
18#include <linux/uaccess.h>
19
20static const struct usb_device_id id_table[] = {
21 { USB_DEVICE(0x0fcf, 0x1008) },
22 { },
23};
24MODULE_DEVICE_TABLE(usb, id_table);
25
26static struct usb_serial_driver suunto_device = {
27 .driver = {
28 .owner = THIS_MODULE,
29 .name = KBUILD_MODNAME,
30 },
31 .id_table = id_table,
32 .num_ports = 1,
33};
34
35static struct usb_serial_driver * const serial_drivers[] = {
36 &suunto_device,
37 NULL,
38};
39
40module_usb_serial_driver(serial_drivers, id_table);
41MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 7182bb774b79..5c9f9b1d7736 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -371,7 +371,7 @@ static int ti_startup(struct usb_serial *serial)
371 usb_set_serial_data(serial, tdev); 371 usb_set_serial_data(serial, tdev);
372 372
373 /* determine device type */ 373 /* determine device type */
374 if (usb_match_id(serial->interface, ti_id_table_3410)) 374 if (serial->type == &ti_1port_device)
375 tdev->td_is_3410 = 1; 375 tdev->td_is_3410 = 1;
376 dev_dbg(&dev->dev, "%s - device type is %s\n", __func__, 376 dev_dbg(&dev->dev, "%s - device type is %s\n", __func__,
377 tdev->td_is_3410 ? "3410" : "5052"); 377 tdev->td_is_3410 ? "3410" : "5052");
@@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev)
1536 char buf[32]; 1536 char buf[32];
1537 1537
1538 /* try ID specific firmware first, then try generic firmware */ 1538 /* try ID specific firmware first, then try generic firmware */
1539 sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, 1539 sprintf(buf, "ti_usb-v%04x-p%04x.fw",
1540 dev->descriptor.idProduct); 1540 le16_to_cpu(dev->descriptor.idVendor),
1541 le16_to_cpu(dev->descriptor.idProduct));
1541 status = request_firmware(&fw_p, buf, &dev->dev); 1542 status = request_firmware(&fw_p, buf, &dev->dev);
1542 1543
1543 if (status != 0) { 1544 if (status != 0) {
1544 buf[0] = '\0'; 1545 buf[0] = '\0';
1545 if (dev->descriptor.idVendor == MTS_VENDOR_ID) { 1546 if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
1546 switch (dev->descriptor.idProduct) { 1547 switch (le16_to_cpu(dev->descriptor.idProduct)) {
1547 case MTS_CDMA_PRODUCT_ID: 1548 case MTS_CDMA_PRODUCT_ID:
1548 strcpy(buf, "mts_cdma.fw"); 1549 strcpy(buf, "mts_cdma.fw");
1549 break; 1550 break;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 8257d30c4072..85365784040b 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb)
291 tty_flip_buffer_push(&port->port); 291 tty_flip_buffer_push(&port->port);
292 } else 292 } else
293 dev_dbg(dev, "%s: empty read urb received\n", __func__); 293 dev_dbg(dev, "%s: empty read urb received\n", __func__);
294 294 }
295 /* Resubmit urb so we continue receiving */ 295 /* Resubmit urb so we continue receiving */
296 err = usb_submit_urb(urb, GFP_ATOMIC); 296 err = usb_submit_urb(urb, GFP_ATOMIC);
297 if (err) { 297 if (err) {
298 if (err != -EPERM) { 298 if (err != -EPERM) {
299 dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); 299 dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
300 /* busy also in error unless we are killed */ 300 __func__, err);
301 usb_mark_last_busy(port->serial->dev); 301 /* busy also in error unless we are killed */
302 }
303 } else {
304 usb_mark_last_busy(port->serial->dev); 302 usb_mark_last_busy(port->serial->dev);
305 } 303 }
304 } else {
305 usb_mark_last_busy(port->serial->dev);
306 } 306 }
307} 307}
308 308
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1799335288bd..c015f2c16729 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -665,6 +665,13 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
665 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 665 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
666 US_FL_FIX_INQUIRY ), 666 US_FL_FIX_INQUIRY ),
667 667
668/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */
669UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100,
670 "Sony Corp.",
671 "MicroVault Flash Drive",
672 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
673 US_FL_NO_READ_CAPACITY_16 ),
674
668/* floppy reports multiple luns */ 675/* floppy reports multiple luns */
669UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210, 676UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
670 "SAMSUNG", 677 "SAMSUNG",
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 16968c899493..d3493ca0525d 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1226,6 +1226,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1226 } 1226 }
1227 spin_lock_irqsave(&xfer->lock, flags); 1227 spin_lock_irqsave(&xfer->lock, flags);
1228 rpipe = xfer->ep->hcpriv; 1228 rpipe = xfer->ep->hcpriv;
1229 if (rpipe == NULL) {
1230 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
1231 __func__, wa_xfer_id(xfer),
1232 "Probably already aborted.\n" );
1233 goto out_unlock;
1234 }
1229 /* Check the delayed list -> if there, release and complete */ 1235 /* Check the delayed list -> if there, release and complete */
1230 spin_lock_irqsave(&wa->xfer_list_lock, flags2); 1236 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1231 if (!list_empty(&xfer->list_node) && xfer->seg == NULL) 1237 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1644,8 +1650,7 @@ static void wa_xfer_result_cb(struct urb *urb)
1644 break; 1650 break;
1645 } 1651 }
1646 usb_status = xfer_result->bTransferStatus & 0x3f; 1652 usb_status = xfer_result->bTransferStatus & 0x3f;
1647 if (usb_status == WA_XFER_STATUS_ABORTED 1653 if (usb_status == WA_XFER_STATUS_NOT_FOUND)
1648 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1649 /* taken care of already */ 1654 /* taken care of already */
1650 break; 1655 break;
1651 xfer_id = xfer_result->dwTransferID; 1656 xfer_id = xfer_result->dwTransferID;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index c5179e269df6..cef6002acbd4 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -137,8 +137,27 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
137 */ 137 */
138 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 138 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
139 139
140 if (vdev->reset_works) 140 /*
141 __pci_reset_function(pdev); 141 * Careful, device_lock may already be held. This is the case if
142 * a driver unbind is blocked. Try to get the locks ourselves to
143 * prevent a deadlock.
144 */
145 if (vdev->reset_works) {
146 bool reset_done = false;
147
148 if (pci_cfg_access_trylock(pdev)) {
149 if (device_trylock(&pdev->dev)) {
150 __pci_reset_function_locked(pdev);
151 reset_done = true;
152 device_unlock(&pdev->dev);
153 }
154 pci_cfg_access_unlock(pdev);
155 }
156
157 if (!reset_done)
158 pr_warn("%s: Unable to acquire locks for reset of %s\n",
159 __func__, dev_name(&pdev->dev));
160 }
142 161
143 pci_restore_state(pdev); 162 pci_restore_state(pdev);
144} 163}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index c488da5db7c7..842f4507883e 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -494,27 +494,6 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
494 return 0; 494 return 0;
495} 495}
496 496
497static int vfio_group_nb_del_dev(struct vfio_group *group, struct device *dev)
498{
499 struct vfio_device *device;
500
501 /*
502 * Expect to fall out here. If a device was in use, it would
503 * have been bound to a vfio sub-driver, which would have blocked
504 * in .remove at vfio_del_group_dev. Sanity check that we no
505 * longer track the device, so it's safe to remove.
506 */
507 device = vfio_group_get_device(group, dev);
508 if (likely(!device))
509 return 0;
510
511 WARN("Device %s removed from live group %d!\n", dev_name(dev),
512 iommu_group_id(group->iommu_group));
513
514 vfio_device_put(device);
515 return 0;
516}
517
518static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) 497static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
519{ 498{
520 /* We don't care what happens when the group isn't in use */ 499 /* We don't care what happens when the group isn't in use */
@@ -531,13 +510,11 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
531 struct device *dev = data; 510 struct device *dev = data;
532 511
533 /* 512 /*
534 * Need to go through a group_lock lookup to get a reference or 513 * Need to go through a group_lock lookup to get a reference or we
535 * we risk racing a group being removed. Leave a WARN_ON for 514 * risk racing a group being removed. Ignore spurious notifies.
536 * debuging, but if the group no longer exists, a spurious notify
537 * is harmless.
538 */ 515 */
539 group = vfio_group_try_get(group); 516 group = vfio_group_try_get(group);
540 if (WARN_ON(!group)) 517 if (!group)
541 return NOTIFY_OK; 518 return NOTIFY_OK;
542 519
543 switch (action) { 520 switch (action) {
@@ -545,7 +522,13 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
545 vfio_group_nb_add_dev(group, dev); 522 vfio_group_nb_add_dev(group, dev);
546 break; 523 break;
547 case IOMMU_GROUP_NOTIFY_DEL_DEVICE: 524 case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
548 vfio_group_nb_del_dev(group, dev); 525 /*
526 * Nothing to do here. If the device is in use, then the
527 * vfio sub-driver should block the remove callback until
528 * it is unused. If the device is unused or attached to a
529 * stub driver, then it should be released and we don't
530 * care that it will be going away.
531 */
549 break; 532 break;
550 case IOMMU_GROUP_NOTIFY_BIND_DRIVER: 533 case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
551 pr_debug("%s: Device %s, group %d binding to driver\n", 534 pr_debug("%s: Device %s, group %d binding to driver\n",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 027be91db139..969a85960e9f 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -15,7 +15,6 @@
15#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/workqueue.h> 17#include <linux/workqueue.h>
18#include <linux/rcupdate.h>
19#include <linux/file.h> 18#include <linux/file.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
21 20
@@ -346,12 +345,11 @@ static void handle_tx(struct vhost_net *net)
346 struct vhost_net_ubuf_ref *uninitialized_var(ubufs); 345 struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
347 bool zcopy, zcopy_used; 346 bool zcopy, zcopy_used;
348 347
349 /* TODO: check that we are running from vhost_worker? */ 348 mutex_lock(&vq->mutex);
350 sock = rcu_dereference_check(vq->private_data, 1); 349 sock = vq->private_data;
351 if (!sock) 350 if (!sock)
352 return; 351 goto out;
353 352
354 mutex_lock(&vq->mutex);
355 vhost_disable_notify(&net->dev, vq); 353 vhost_disable_notify(&net->dev, vq);
356 354
357 hdr_size = nvq->vhost_hlen; 355 hdr_size = nvq->vhost_hlen;
@@ -461,7 +459,7 @@ static void handle_tx(struct vhost_net *net)
461 break; 459 break;
462 } 460 }
463 } 461 }
464 462out:
465 mutex_unlock(&vq->mutex); 463 mutex_unlock(&vq->mutex);
466} 464}
467 465
@@ -570,14 +568,14 @@ static void handle_rx(struct vhost_net *net)
570 s16 headcount; 568 s16 headcount;
571 size_t vhost_hlen, sock_hlen; 569 size_t vhost_hlen, sock_hlen;
572 size_t vhost_len, sock_len; 570 size_t vhost_len, sock_len;
573 /* TODO: check that we are running from vhost_worker? */ 571 struct socket *sock;
574 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
575
576 if (!sock)
577 return;
578 572
579 mutex_lock(&vq->mutex); 573 mutex_lock(&vq->mutex);
574 sock = vq->private_data;
575 if (!sock)
576 goto out;
580 vhost_disable_notify(&net->dev, vq); 577 vhost_disable_notify(&net->dev, vq);
578
581 vhost_hlen = nvq->vhost_hlen; 579 vhost_hlen = nvq->vhost_hlen;
582 sock_hlen = nvq->sock_hlen; 580 sock_hlen = nvq->sock_hlen;
583 581
@@ -652,7 +650,7 @@ static void handle_rx(struct vhost_net *net)
652 break; 650 break;
653 } 651 }
654 } 652 }
655 653out:
656 mutex_unlock(&vq->mutex); 654 mutex_unlock(&vq->mutex);
657} 655}
658 656
@@ -750,8 +748,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
750 struct vhost_poll *poll = n->poll + (nvq - n->vqs); 748 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
751 struct socket *sock; 749 struct socket *sock;
752 750
753 sock = rcu_dereference_protected(vq->private_data, 751 sock = vq->private_data;
754 lockdep_is_held(&vq->mutex));
755 if (!sock) 752 if (!sock)
756 return 0; 753 return 0;
757 754
@@ -764,10 +761,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
764 struct socket *sock; 761 struct socket *sock;
765 762
766 mutex_lock(&vq->mutex); 763 mutex_lock(&vq->mutex);
767 sock = rcu_dereference_protected(vq->private_data, 764 sock = vq->private_data;
768 lockdep_is_held(&vq->mutex));
769 vhost_net_disable_vq(n, vq); 765 vhost_net_disable_vq(n, vq);
770 rcu_assign_pointer(vq->private_data, NULL); 766 vq->private_data = NULL;
771 mutex_unlock(&vq->mutex); 767 mutex_unlock(&vq->mutex);
772 return sock; 768 return sock;
773} 769}
@@ -923,8 +919,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
923 } 919 }
924 920
925 /* start polling new socket */ 921 /* start polling new socket */
926 oldsock = rcu_dereference_protected(vq->private_data, 922 oldsock = vq->private_data;
927 lockdep_is_held(&vq->mutex));
928 if (sock != oldsock) { 923 if (sock != oldsock) {
929 ubufs = vhost_net_ubuf_alloc(vq, 924 ubufs = vhost_net_ubuf_alloc(vq,
930 sock && vhost_sock_zcopy(sock)); 925 sock && vhost_sock_zcopy(sock));
@@ -934,7 +929,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
934 } 929 }
935 930
936 vhost_net_disable_vq(n, vq); 931 vhost_net_disable_vq(n, vq);
937 rcu_assign_pointer(vq->private_data, sock); 932 vq->private_data = sock;
938 r = vhost_init_used(vq); 933 r = vhost_init_used(vq);
939 if (r) 934 if (r)
940 goto err_used; 935 goto err_used;
@@ -968,7 +963,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
968 return 0; 963 return 0;
969 964
970err_used: 965err_used:
971 rcu_assign_pointer(vq->private_data, oldsock); 966 vq->private_data = oldsock;
972 vhost_net_enable_vq(n, vq); 967 vhost_net_enable_vq(n, vq);
973 if (ubufs) 968 if (ubufs)
974 vhost_net_ubuf_put_wait_and_free(ubufs); 969 vhost_net_ubuf_put_wait_and_free(ubufs);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 06adf31a9248..0c27c7df1b09 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -902,19 +902,15 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
902 int head, ret; 902 int head, ret;
903 u8 target; 903 u8 target;
904 904
905 mutex_lock(&vq->mutex);
905 /* 906 /*
906 * We can handle the vq only after the endpoint is setup by calling the 907 * We can handle the vq only after the endpoint is setup by calling the
907 * VHOST_SCSI_SET_ENDPOINT ioctl. 908 * VHOST_SCSI_SET_ENDPOINT ioctl.
908 *
909 * TODO: Check that we are running from vhost_worker which acts
910 * as read-side critical section for vhost kind of RCU.
911 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
912 */ 909 */
913 vs_tpg = rcu_dereference_check(vq->private_data, 1); 910 vs_tpg = vq->private_data;
914 if (!vs_tpg) 911 if (!vs_tpg)
915 return; 912 goto out;
916 913
917 mutex_lock(&vq->mutex);
918 vhost_disable_notify(&vs->dev, vq); 914 vhost_disable_notify(&vs->dev, vq);
919 915
920 for (;;) { 916 for (;;) {
@@ -1064,6 +1060,7 @@ err_free:
1064 vhost_scsi_free_cmd(cmd); 1060 vhost_scsi_free_cmd(cmd);
1065err_cmd: 1061err_cmd:
1066 vhost_scsi_send_bad_target(vs, vq, head, out); 1062 vhost_scsi_send_bad_target(vs, vq, head, out);
1063out:
1067 mutex_unlock(&vq->mutex); 1064 mutex_unlock(&vq->mutex);
1068} 1065}
1069 1066
@@ -1232,9 +1229,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1232 sizeof(vs->vs_vhost_wwpn)); 1229 sizeof(vs->vs_vhost_wwpn));
1233 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1230 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1234 vq = &vs->vqs[i].vq; 1231 vq = &vs->vqs[i].vq;
1235 /* Flushing the vhost_work acts as synchronize_rcu */
1236 mutex_lock(&vq->mutex); 1232 mutex_lock(&vq->mutex);
1237 rcu_assign_pointer(vq->private_data, vs_tpg); 1233 vq->private_data = vs_tpg;
1238 vhost_init_used(vq); 1234 vhost_init_used(vq);
1239 mutex_unlock(&vq->mutex); 1235 mutex_unlock(&vq->mutex);
1240 } 1236 }
@@ -1313,9 +1309,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1313 if (match) { 1309 if (match) {
1314 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1310 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1315 vq = &vs->vqs[i].vq; 1311 vq = &vs->vqs[i].vq;
1316 /* Flushing the vhost_work acts as synchronize_rcu */
1317 mutex_lock(&vq->mutex); 1312 mutex_lock(&vq->mutex);
1318 rcu_assign_pointer(vq->private_data, NULL); 1313 vq->private_data = NULL;
1319 mutex_unlock(&vq->mutex); 1314 mutex_unlock(&vq->mutex);
1320 } 1315 }
1321 } 1316 }
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index a73ea217f24d..339eae85859a 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/rcupdate.h>
17#include <linux/file.h> 16#include <linux/file.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
19 18
@@ -200,9 +199,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
200 priv = test ? n : NULL; 199 priv = test ? n : NULL;
201 200
202 /* start polling new socket */ 201 /* start polling new socket */
203 oldpriv = rcu_dereference_protected(vq->private_data, 202 oldpriv = vq->private_data;
204 lockdep_is_held(&vq->mutex)); 203 vq->private_data = priv;
205 rcu_assign_pointer(vq->private_data, priv);
206 204
207 r = vhost_init_used(&n->vqs[index]); 205 r = vhost_init_used(&n->vqs[index]);
208 206
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 42298cd23c73..4465ed5f316d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -103,14 +103,8 @@ struct vhost_virtqueue {
103 struct iovec iov[UIO_MAXIOV]; 103 struct iovec iov[UIO_MAXIOV];
104 struct iovec *indirect; 104 struct iovec *indirect;
105 struct vring_used_elem *heads; 105 struct vring_used_elem *heads;
106 /* We use a kind of RCU to access private pointer. 106 /* Protected by virtqueue mutex. */
107 * All readers access it from worker, which makes it possible to 107 void *private_data;
108 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
109 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
110 * vhost_work execution acts instead of rcu_read_lock() and the end of
111 * vhost_work execution acts instead of rcu_read_unlock().
112 * Writers use virtqueue mutex. */
113 void __rcu *private_data;
114 /* Log write descriptors */ 108 /* Log write descriptors */
115 void __user *log_base; 109 void __user *log_base;
116 struct vhost_log *log; 110 struct vhost_log *log;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index a89c15de9f45..9b0f12c5c284 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -435,8 +435,8 @@ static int correct_chipset(struct atyfb_par *par)
435 const char *name; 435 const char *name;
436 int i; 436 int i;
437 437
438 for (i = ARRAY_SIZE(aty_chips); i > 0; i--) 438 for (i = (int)ARRAY_SIZE(aty_chips) - 1; i >= 0; i--)
439 if (par->pci_id == aty_chips[i - 1].pci_id) 439 if (par->pci_id == aty_chips[i].pci_id)
440 break; 440 break;
441 441
442 if (i < 0) 442 if (i < 0)
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 5ca11b066b7e..886e797f75f9 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -101,33 +101,37 @@ static const struct backlight_ops max8925_backlight_ops = {
101 .get_brightness = max8925_backlight_get_brightness, 101 .get_brightness = max8925_backlight_get_brightness,
102}; 102};
103 103
104#ifdef CONFIG_OF 104static void max8925_backlight_dt_init(struct platform_device *pdev)
105static int max8925_backlight_dt_init(struct platform_device *pdev,
106 struct max8925_backlight_pdata *pdata)
107{ 105{
108 struct device_node *nproot = pdev->dev.parent->of_node, *np; 106 struct device_node *nproot = pdev->dev.parent->of_node, *np;
109 int dual_string; 107 struct max8925_backlight_pdata *pdata;
108 u32 val;
109
110 if (!nproot || !IS_ENABLED(CONFIG_OF))
111 return;
112
113 pdata = devm_kzalloc(&pdev->dev,
114 sizeof(struct max8925_backlight_pdata),
115 GFP_KERNEL);
116 if (!pdata)
117 return;
110 118
111 if (!nproot)
112 return -ENODEV;
113 np = of_find_node_by_name(nproot, "backlight"); 119 np = of_find_node_by_name(nproot, "backlight");
114 if (!np) { 120 if (!np) {
115 dev_err(&pdev->dev, "failed to find backlight node\n"); 121 dev_err(&pdev->dev, "failed to find backlight node\n");
116 return -ENODEV; 122 return;
117 } 123 }
118 124
119 of_property_read_u32(np, "maxim,max8925-dual-string", &dual_string); 125 if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
120 pdata->dual_string = dual_string; 126 pdata->dual_string = val;
121 return 0; 127
128 pdev->dev.platform_data = pdata;
122} 129}
123#else
124#define max8925_backlight_dt_init(x, y) (-1)
125#endif
126 130
127static int max8925_backlight_probe(struct platform_device *pdev) 131static int max8925_backlight_probe(struct platform_device *pdev)
128{ 132{
129 struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); 133 struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
130 struct max8925_backlight_pdata *pdata = pdev->dev.platform_data; 134 struct max8925_backlight_pdata *pdata;
131 struct max8925_backlight_data *data; 135 struct max8925_backlight_data *data;
132 struct backlight_device *bl; 136 struct backlight_device *bl;
133 struct backlight_properties props; 137 struct backlight_properties props;
@@ -170,13 +174,10 @@ static int max8925_backlight_probe(struct platform_device *pdev)
170 platform_set_drvdata(pdev, bl); 174 platform_set_drvdata(pdev, bl);
171 175
172 value = 0; 176 value = 0;
173 if (pdev->dev.parent->of_node && !pdata) { 177 if (!pdev->dev.platform_data)
174 pdata = devm_kzalloc(&pdev->dev, 178 max8925_backlight_dt_init(pdev);
175 sizeof(struct max8925_backlight_pdata),
176 GFP_KERNEL);
177 max8925_backlight_dt_init(pdev, pdata);
178 }
179 179
180 pdata = pdev->dev.platform_data;
180 if (pdata) { 181 if (pdata) {
181 if (pdata->lxw_scl) 182 if (pdata->lxw_scl)
182 value |= (1 << 7); 183 value |= (1 << 7);
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 40178338b619..9e758a8f890d 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -22,6 +22,7 @@
22 */ 22 */
23 23
24#include <linux/bitops.h> 24#include <linux/bitops.h>
25#include <linux/bug.h>
25#include <linux/errno.h> 26#include <linux/errno.h>
26#include <linux/export.h> 27#include <linux/export.h>
27#include <linux/hdmi.h> 28#include <linux/hdmi.h>
@@ -52,7 +53,7 @@ int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
52 53
53 frame->type = HDMI_INFOFRAME_TYPE_AVI; 54 frame->type = HDMI_INFOFRAME_TYPE_AVI;
54 frame->version = 2; 55 frame->version = 2;
55 frame->length = 13; 56 frame->length = HDMI_AVI_INFOFRAME_SIZE;
56 57
57 return 0; 58 return 0;
58} 59}
@@ -83,7 +84,7 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
83 if (size < length) 84 if (size < length)
84 return -ENOSPC; 85 return -ENOSPC;
85 86
86 memset(buffer, 0, length); 87 memset(buffer, 0, size);
87 88
88 ptr[0] = frame->type; 89 ptr[0] = frame->type;
89 ptr[1] = frame->version; 90 ptr[1] = frame->version;
@@ -95,13 +96,18 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
95 96
96 ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3); 97 ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
97 98
98 if (frame->active_info_valid) 99 /*
100 * Data byte 1, bit 4 has to be set if we provide the active format
101 * aspect ratio
102 */
103 if (frame->active_aspect & 0xf)
99 ptr[0] |= BIT(4); 104 ptr[0] |= BIT(4);
100 105
101 if (frame->horizontal_bar_valid) 106 /* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */
107 if (frame->top_bar || frame->bottom_bar)
102 ptr[0] |= BIT(3); 108 ptr[0] |= BIT(3);
103 109
104 if (frame->vertical_bar_valid) 110 if (frame->left_bar || frame->right_bar)
105 ptr[0] |= BIT(2); 111 ptr[0] |= BIT(2);
106 112
107 ptr[1] = ((frame->colorimetry & 0x3) << 6) | 113 ptr[1] = ((frame->colorimetry & 0x3) << 6) |
@@ -151,7 +157,7 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
151 157
152 frame->type = HDMI_INFOFRAME_TYPE_SPD; 158 frame->type = HDMI_INFOFRAME_TYPE_SPD;
153 frame->version = 1; 159 frame->version = 1;
154 frame->length = 25; 160 frame->length = HDMI_SPD_INFOFRAME_SIZE;
155 161
156 strncpy(frame->vendor, vendor, sizeof(frame->vendor)); 162 strncpy(frame->vendor, vendor, sizeof(frame->vendor));
157 strncpy(frame->product, product, sizeof(frame->product)); 163 strncpy(frame->product, product, sizeof(frame->product));
@@ -185,7 +191,7 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
185 if (size < length) 191 if (size < length)
186 return -ENOSPC; 192 return -ENOSPC;
187 193
188 memset(buffer, 0, length); 194 memset(buffer, 0, size);
189 195
190 ptr[0] = frame->type; 196 ptr[0] = frame->type;
191 ptr[1] = frame->version; 197 ptr[1] = frame->version;
@@ -218,7 +224,7 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
218 224
219 frame->type = HDMI_INFOFRAME_TYPE_AUDIO; 225 frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
220 frame->version = 1; 226 frame->version = 1;
221 frame->length = 10; 227 frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
222 228
223 return 0; 229 return 0;
224} 230}
@@ -250,7 +256,7 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
250 if (size < length) 256 if (size < length)
251 return -ENOSPC; 257 return -ENOSPC;
252 258
253 memset(buffer, 0, length); 259 memset(buffer, 0, size);
254 260
255 if (frame->channels >= 2) 261 if (frame->channels >= 2)
256 channels = frame->channels - 1; 262 channels = frame->channels - 1;
@@ -282,9 +288,33 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
282EXPORT_SYMBOL(hdmi_audio_infoframe_pack); 288EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
283 289
284/** 290/**
285 * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary 291 * hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
286 * buffer
287 * @frame: HDMI vendor infoframe 292 * @frame: HDMI vendor infoframe
293 *
294 * Returns 0 on success or a negative error code on failure.
295 */
296int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
297{
298 memset(frame, 0, sizeof(*frame));
299
300 frame->type = HDMI_INFOFRAME_TYPE_VENDOR;
301 frame->version = 1;
302
303 frame->oui = HDMI_IEEE_OUI;
304
305 /*
306 * 0 is a valid value for s3d_struct, so we use a special "not set"
307 * value
308 */
309 frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
310
311 return 0;
312}
313EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
314
315/**
316 * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
317 * @frame: HDMI infoframe
288 * @buffer: destination buffer 318 * @buffer: destination buffer
289 * @size: size of buffer 319 * @size: size of buffer
290 * 320 *
@@ -297,27 +327,110 @@ EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
297 * error code on failure. 327 * error code on failure.
298 */ 328 */
299ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, 329ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
300 void *buffer, size_t size) 330 void *buffer, size_t size)
301{ 331{
302 u8 *ptr = buffer; 332 u8 *ptr = buffer;
303 size_t length; 333 size_t length;
304 334
335 /* empty info frame */
336 if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
337 return -EINVAL;
338
339 /* only one of those can be supplied */
340 if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
341 return -EINVAL;
342
343 /* for side by side (half) we also need to provide 3D_Ext_Data */
344 if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
345 frame->length = 6;
346 else
347 frame->length = 5;
348
305 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; 349 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
306 350
307 if (size < length) 351 if (size < length)
308 return -ENOSPC; 352 return -ENOSPC;
309 353
310 memset(buffer, 0, length); 354 memset(buffer, 0, size);
311 355
312 ptr[0] = frame->type; 356 ptr[0] = frame->type;
313 ptr[1] = frame->version; 357 ptr[1] = frame->version;
314 ptr[2] = frame->length; 358 ptr[2] = frame->length;
315 ptr[3] = 0; /* checksum */ 359 ptr[3] = 0; /* checksum */
316 360
317 memcpy(&ptr[HDMI_INFOFRAME_HEADER_SIZE], frame->data, frame->length); 361 /* HDMI OUI */
362 ptr[4] = 0x03;
363 ptr[5] = 0x0c;
364 ptr[6] = 0x00;
365
366 if (frame->vic) {
367 ptr[7] = 0x1 << 5; /* video format */
368 ptr[8] = frame->vic;
369 } else {
370 ptr[7] = 0x2 << 5; /* video format */
371 ptr[8] = (frame->s3d_struct & 0xf) << 4;
372 if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
373 ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
374 }
318 375
319 hdmi_infoframe_checksum(buffer, length); 376 hdmi_infoframe_checksum(buffer, length);
320 377
321 return length; 378 return length;
322} 379}
323EXPORT_SYMBOL(hdmi_vendor_infoframe_pack); 380EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
381
382/*
383 * hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
384 */
385static ssize_t
386hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
387 void *buffer, size_t size)
388{
389 /* we only know about HDMI vendor infoframes */
390 if (frame->any.oui != HDMI_IEEE_OUI)
391 return -EINVAL;
392
393 return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
394}
395
396/**
397 * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
398 * @frame: HDMI infoframe
399 * @buffer: destination buffer
400 * @size: size of buffer
401 *
402 * Packs the information contained in the @frame structure into a binary
403 * representation that can be written into the corresponding controller
404 * registers. Also computes the checksum as required by section 5.3.5 of
405 * the HDMI 1.4 specification.
406 *
407 * Returns the number of bytes packed into the binary buffer or a negative
408 * error code on failure.
409 */
410ssize_t
411hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
412{
413 ssize_t length;
414
415 switch (frame->any.type) {
416 case HDMI_INFOFRAME_TYPE_AVI:
417 length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
418 break;
419 case HDMI_INFOFRAME_TYPE_SPD:
420 length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
421 break;
422 case HDMI_INFOFRAME_TYPE_AUDIO:
423 length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
424 break;
425 case HDMI_INFOFRAME_TYPE_VENDOR:
426 length = hdmi_vendor_any_infoframe_pack(&frame->vendor,
427 buffer, size);
428 break;
429 default:
430 WARN(1, "Bad infoframe type %d\n", frame->any.type);
431 length = -EINVAL;
432 }
433
434 return length;
435}
436EXPORT_SYMBOL(hdmi_infoframe_pack);
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 3ba37713b1f9..dc09ebe4aba5 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -239,24 +239,6 @@ static const struct fb_bitfield def_rgb565[] = {
239 } 239 }
240}; 240};
241 241
242static const struct fb_bitfield def_rgb666[] = {
243 [RED] = {
244 .offset = 16,
245 .length = 6,
246 },
247 [GREEN] = {
248 .offset = 8,
249 .length = 6,
250 },
251 [BLUE] = {
252 .offset = 0,
253 .length = 6,
254 },
255 [TRANSP] = { /* no support for transparency */
256 .length = 0,
257 }
258};
259
260static const struct fb_bitfield def_rgb888[] = { 242static const struct fb_bitfield def_rgb888[] = {
261 [RED] = { 243 [RED] = {
262 .offset = 16, 244 .offset = 16,
@@ -309,9 +291,6 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var,
309 break; 291 break;
310 case STMLCDIF_16BIT: 292 case STMLCDIF_16BIT:
311 case STMLCDIF_18BIT: 293 case STMLCDIF_18BIT:
312 /* 24 bit to 18 bit mapping */
313 rgb = def_rgb666;
314 break;
315 case STMLCDIF_24BIT: 294 case STMLCDIF_24BIT:
316 /* real 24 bit */ 295 /* real 24 bit */
317 rgb = def_rgb888; 296 rgb = def_rgb888;
@@ -453,11 +432,6 @@ static int mxsfb_set_par(struct fb_info *fb_info)
453 return -EINVAL; 432 return -EINVAL;
454 case STMLCDIF_16BIT: 433 case STMLCDIF_16BIT:
455 case STMLCDIF_18BIT: 434 case STMLCDIF_18BIT:
456 /* 24 bit to 18 bit mapping */
457 ctrl |= CTRL_DF24; /* ignore the upper 2 bits in
458 * each colour component
459 */
460 break;
461 case STMLCDIF_24BIT: 435 case STMLCDIF_24BIT:
462 /* real 24 bit */ 436 /* real 24 bit */
463 break; 437 break;
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 8c527e5b293c..796e5112ceee 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -587,8 +587,7 @@ static int nuc900fb_probe(struct platform_device *pdev)
587 fbinfo->flags = FBINFO_FLAG_DEFAULT; 587 fbinfo->flags = FBINFO_FLAG_DEFAULT;
588 fbinfo->pseudo_palette = &fbi->pseudo_pal; 588 fbinfo->pseudo_palette = &fbi->pseudo_pal;
589 589
590 ret = request_irq(irq, nuc900fb_irqhandler, 0, 590 ret = request_irq(irq, nuc900fb_irqhandler, 0, pdev->name, fbi);
591 pdev->name, fbinfo);
592 if (ret) { 591 if (ret) {
593 dev_err(&pdev->dev, "cannot register irq handler %d -err %d\n", 592 dev_err(&pdev->dev, "cannot register irq handler %d -err %d\n",
594 irq, ret); 593 irq, ret);
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index 5338f362293b..1b60698f141e 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -28,6 +28,20 @@ struct panel_drv_data {
28 bool invert_polarity; 28 bool invert_polarity;
29}; 29};
30 30
31static const struct omap_video_timings tvc_pal_timings = {
32 .x_res = 720,
33 .y_res = 574,
34 .pixel_clock = 13500,
35 .hsw = 64,
36 .hfp = 12,
37 .hbp = 68,
38 .vsw = 5,
39 .vfp = 5,
40 .vbp = 41,
41
42 .interlace = true,
43};
44
31#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) 45#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
32 46
33static int tvc_connect(struct omap_dss_device *dssdev) 47static int tvc_connect(struct omap_dss_device *dssdev)
@@ -212,14 +226,14 @@ static int tvc_probe(struct platform_device *pdev)
212 return -ENODEV; 226 return -ENODEV;
213 } 227 }
214 228
215 ddata->timings = omap_dss_pal_timings; 229 ddata->timings = tvc_pal_timings;
216 230
217 dssdev = &ddata->dssdev; 231 dssdev = &ddata->dssdev;
218 dssdev->driver = &tvc_driver; 232 dssdev->driver = &tvc_driver;
219 dssdev->dev = &pdev->dev; 233 dssdev->dev = &pdev->dev;
220 dssdev->type = OMAP_DISPLAY_TYPE_VENC; 234 dssdev->type = OMAP_DISPLAY_TYPE_VENC;
221 dssdev->owner = THIS_MODULE; 235 dssdev->owner = THIS_MODULE;
222 dssdev->panel.timings = omap_dss_pal_timings; 236 dssdev->panel.timings = tvc_pal_timings;
223 237
224 r = omapdss_register_display(dssdev); 238 r = omapdss_register_display(dssdev);
225 if (r) { 239 if (r) {
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index b2a8912f6435..a9ac3ce2d0e9 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -713,7 +713,7 @@ static int sgivwfb_mmap(struct fb_info *info,
713 r = vm_iomap_memory(vma, sgivwfb_mem_phys, sgivwfb_mem_size); 713 r = vm_iomap_memory(vma, sgivwfb_mem_phys, sgivwfb_mem_size);
714 714
715 printk(KERN_DEBUG "sgivwfb: mmap framebuffer P(%lx)->V(%lx)\n", 715 printk(KERN_DEBUG "sgivwfb: mmap framebuffer P(%lx)->V(%lx)\n",
716 offset, vma->vm_start); 716 sgivwfb_mem_phys + (vma->vm_pgoff << PAGE_SHIFT), vma->vm_start);
717 717
718 return r; 718 return r;
719} 719}
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index a8c6c43a4658..1265b25f9f99 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -567,7 +567,7 @@ static int sh7760fb_remove(struct platform_device *dev)
567 fb_dealloc_cmap(&info->cmap); 567 fb_dealloc_cmap(&info->cmap);
568 sh7760fb_free_mem(info); 568 sh7760fb_free_mem(info);
569 if (par->irq >= 0) 569 if (par->irq >= 0)
570 free_irq(par->irq, par); 570 free_irq(par->irq, &par->vsync);
571 iounmap(par->base); 571 iounmap(par->base);
572 release_mem_region(par->ioarea->start, resource_size(par->ioarea)); 572 release_mem_region(par->ioarea->start, resource_size(par->ioarea));
573 framebuffer_release(info); 573 framebuffer_release(info);
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 830ded45fd47..2827333703d9 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1265,7 +1265,6 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
1265 1265
1266static void vga16fb_destroy(struct fb_info *info) 1266static void vga16fb_destroy(struct fb_info *info)
1267{ 1267{
1268 struct platform_device *dev = container_of(info->device, struct platform_device, dev);
1269 iounmap(info->screen_base); 1268 iounmap(info->screen_base);
1270 fb_dealloc_cmap(&info->cmap); 1269 fb_dealloc_cmap(&info->cmap);
1271 /* XXX unshare VGA regions */ 1270 /* XXX unshare VGA regions */
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index f3d4a69e1e4e..6629b29a8202 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -341,8 +341,8 @@ static int xilinxfb_assign(struct platform_device *pdev,
341 341
342 if (drvdata->flags & BUS_ACCESS_FLAG) { 342 if (drvdata->flags & BUS_ACCESS_FLAG) {
343 /* Put a banner in the log (for DEBUG) */ 343 /* Put a banner in the log (for DEBUG) */
344 dev_dbg(dev, "regs: phys=%x, virt=%p\n", drvdata->regs_phys, 344 dev_dbg(dev, "regs: phys=%pa, virt=%p\n",
345 drvdata->regs); 345 &drvdata->regs_phys, drvdata->regs);
346 } 346 }
347 /* Put a banner in the log (for DEBUG) */ 347 /* Put a banner in the log (for DEBUG) */
348 dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n", 348 dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n",
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 9e02d60a364b..23eae5cb69c2 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -145,7 +145,7 @@ config SWIOTLB_XEN
145 145
146config XEN_TMEM 146config XEN_TMEM
147 tristate 147 tristate
148 depends on !ARM 148 depends on !ARM && !ARM64
149 default m if (CLEANCACHE || FRONTSWAP) 149 default m if (CLEANCACHE || FRONTSWAP)
150 help 150 help
151 Shim to interface in-kernel Transcendent Memory hooks 151 Shim to interface in-kernel Transcendent Memory hooks
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index eabd0ee1c2bc..14fe79d8634a 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,9 +1,8 @@
1ifneq ($(CONFIG_ARM),y) 1ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
2obj-y += manage.o
3obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 2obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
4endif 3endif
5obj-$(CONFIG_X86) += fallback.o 4obj-$(CONFIG_X86) += fallback.o
6obj-y += grant-table.o features.o events.o balloon.o 5obj-y += grant-table.o features.o events.o balloon.o manage.o
7obj-y += xenbus/ 6obj-y += xenbus/
8 7
9nostackp := $(call cc-option, -fno-stack-protector) 8nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index a58ac435a9a4..5e8be462aed5 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void)
348 348
349 for_each_possible_cpu(i) 349 for_each_possible_cpu(i)
350 memset(per_cpu(cpu_evtchn_mask, i), 350 memset(per_cpu(cpu_evtchn_mask, i),
351 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); 351 (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
352} 352}
353 353
354static inline void clear_evtchn(int port) 354static inline void clear_evtchn(int port)
@@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
1493/* Rebind an evtchn so that it gets delivered to a specific cpu */ 1493/* Rebind an evtchn so that it gets delivered to a specific cpu */
1494static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 1494static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1495{ 1495{
1496 struct shared_info *s = HYPERVISOR_shared_info;
1496 struct evtchn_bind_vcpu bind_vcpu; 1497 struct evtchn_bind_vcpu bind_vcpu;
1497 int evtchn = evtchn_from_irq(irq); 1498 int evtchn = evtchn_from_irq(irq);
1499 int masked;
1498 1500
1499 if (!VALID_EVTCHN(evtchn)) 1501 if (!VALID_EVTCHN(evtchn))
1500 return -1; 1502 return -1;
@@ -1511,6 +1513,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1511 bind_vcpu.vcpu = tcpu; 1513 bind_vcpu.vcpu = tcpu;
1512 1514
1513 /* 1515 /*
1516 * Mask the event while changing the VCPU binding to prevent
1517 * it being delivered on an unexpected VCPU.
1518 */
1519 masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
1520
1521 /*
1514 * If this fails, it usually just indicates that we're dealing with a 1522 * If this fails, it usually just indicates that we're dealing with a
1515 * virq or IPI channel, which don't actually need to be rebound. Ignore 1523 * virq or IPI channel, which don't actually need to be rebound. Ignore
1516 * it, but don't do the xenlinux-level rebind in that case. 1524 * it, but don't do the xenlinux-level rebind in that case.
@@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1518 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 1526 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1519 bind_evtchn_to_cpu(evtchn, tcpu); 1527 bind_evtchn_to_cpu(evtchn, tcpu);
1520 1528
1529 if (!masked)
1530 unmask_evtchn(evtchn);
1531
1521 return 0; 1532 return 0;
1522} 1533}
1523 1534
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 8feecf01d55c..b6165e047f48 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -379,18 +379,12 @@ static long evtchn_ioctl(struct file *file,
379 if (unbind.port >= NR_EVENT_CHANNELS) 379 if (unbind.port >= NR_EVENT_CHANNELS)
380 break; 380 break;
381 381
382 spin_lock_irq(&port_user_lock);
383
384 rc = -ENOTCONN; 382 rc = -ENOTCONN;
385 if (get_port_user(unbind.port) != u) { 383 if (get_port_user(unbind.port) != u)
386 spin_unlock_irq(&port_user_lock);
387 break; 384 break;
388 }
389 385
390 disable_irq(irq_from_evtchn(unbind.port)); 386 disable_irq(irq_from_evtchn(unbind.port));
391 387
392 spin_unlock_irq(&port_user_lock);
393
394 evtchn_unbind_from_user(u, unbind.port); 388 evtchn_unbind_from_user(u, unbind.port);
395 389
396 rc = 0; 390 rc = 0;
@@ -490,26 +484,15 @@ static int evtchn_release(struct inode *inode, struct file *filp)
490 int i; 484 int i;
491 struct per_user_data *u = filp->private_data; 485 struct per_user_data *u = filp->private_data;
492 486
493 spin_lock_irq(&port_user_lock);
494
495 free_page((unsigned long)u->ring);
496
497 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 487 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
498 if (get_port_user(i) != u) 488 if (get_port_user(i) != u)
499 continue; 489 continue;
500 490
501 disable_irq(irq_from_evtchn(i)); 491 disable_irq(irq_from_evtchn(i));
502 }
503
504 spin_unlock_irq(&port_user_lock);
505
506 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
507 if (get_port_user(i) != u)
508 continue;
509
510 evtchn_unbind_from_user(get_port_user(i), i); 492 evtchn_unbind_from_user(get_port_user(i), i);
511 } 493 }
512 494
495 free_page((unsigned long)u->ring);
513 kfree(u->name); 496 kfree(u->name);
514 kfree(u); 497 kfree(u);
515 498
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 6ed8a9df4472..34b20bfa4e8c 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -115,7 +115,6 @@ static int xenbus_frontend_dev_resume(struct device *dev)
115 return -EFAULT; 115 return -EFAULT;
116 } 116 }
117 117
118 INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume);
119 queue_work(xenbus_frontend_wq, &xdev->work); 118 queue_work(xenbus_frontend_wq, &xdev->work);
120 119
121 return 0; 120 return 0;
@@ -124,6 +123,16 @@ static int xenbus_frontend_dev_resume(struct device *dev)
124 return xenbus_dev_resume(dev); 123 return xenbus_dev_resume(dev);
125} 124}
126 125
126static int xenbus_frontend_dev_probe(struct device *dev)
127{
128 if (xen_store_domain_type == XS_LOCAL) {
129 struct xenbus_device *xdev = to_xenbus_device(dev);
130 INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume);
131 }
132
133 return xenbus_dev_probe(dev);
134}
135
127static const struct dev_pm_ops xenbus_pm_ops = { 136static const struct dev_pm_ops xenbus_pm_ops = {
128 .suspend = xenbus_dev_suspend, 137 .suspend = xenbus_dev_suspend,
129 .resume = xenbus_frontend_dev_resume, 138 .resume = xenbus_frontend_dev_resume,
@@ -142,7 +151,7 @@ static struct xen_bus_type xenbus_frontend = {
142 .name = "xen", 151 .name = "xen",
143 .match = xenbus_match, 152 .match = xenbus_match,
144 .uevent = xenbus_uevent_frontend, 153 .uevent = xenbus_uevent_frontend,
145 .probe = xenbus_dev_probe, 154 .probe = xenbus_frontend_dev_probe,
146 .remove = xenbus_dev_remove, 155 .remove = xenbus_dev_remove,
147 .shutdown = xenbus_dev_shutdown, 156 .shutdown = xenbus_dev_shutdown,
148 .dev_attrs = xenbus_dev_attrs, 157 .dev_attrs = xenbus_dev_attrs,
@@ -474,7 +483,11 @@ static int __init xenbus_probe_frontend_init(void)
474 483
475 register_xenstore_notifier(&xenstore_notifier); 484 register_xenstore_notifier(&xenstore_notifier);
476 485
477 xenbus_frontend_wq = create_workqueue("xenbus_frontend"); 486 if (xen_store_domain_type == XS_LOCAL) {
487 xenbus_frontend_wq = create_workqueue("xenbus_frontend");
488 if (!xenbus_frontend_wq)
489 pr_warn("create xenbus frontend workqueue failed, S3 resume is likely to fail\n");
490 }
478 491
479 return 0; 492 return 0;
480} 493}
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 5e376bb93419..8defc6b3f9a2 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -40,7 +40,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
40 int block, off; 40 int block, off;
41 41
42 inode = iget_locked(sb, ino); 42 inode = iget_locked(sb, ino);
43 if (IS_ERR(inode)) 43 if (!inode)
44 return ERR_PTR(-ENOMEM); 44 return ERR_PTR(-ENOMEM);
45 if (!(inode->i_state & I_NEW)) 45 if (!(inode->i_state & I_NEW))
46 return inode; 46 return inode;
diff --git a/fs/bio.c b/fs/bio.c
index 94bbc04dba77..c5eae7251490 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
1045int bio_uncopy_user(struct bio *bio) 1045int bio_uncopy_user(struct bio *bio)
1046{ 1046{
1047 struct bio_map_data *bmd = bio->bi_private; 1047 struct bio_map_data *bmd = bio->bi_private;
1048 int ret = 0; 1048 struct bio_vec *bvec;
1049 int ret = 0, i;
1049 1050
1050 if (!bio_flagged(bio, BIO_NULL_MAPPED)) 1051 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1051 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, 1052 /*
1052 bmd->nr_sgvecs, bio_data_dir(bio) == READ, 1053 * if we're in a workqueue, the request is orphaned, so
1053 0, bmd->is_our_pages); 1054 * don't copy into a random user address space, just free.
1055 */
1056 if (current->mm)
1057 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
1058 bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1059 0, bmd->is_our_pages);
1060 else if (bmd->is_our_pages)
1061 bio_for_each_segment_all(bvec, bio, i)
1062 __free_page(bvec->bv_page);
1063 }
1054 bio_free_map_data(bmd); 1064 bio_free_map_data(bmd);
1055 bio_put(bio); 1065 bio_put(bio);
1056 return ret; 1066 return ret;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index eaf133384a8f..8bc5e8ccb091 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -36,16 +36,23 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
36 u64 extent_item_pos, 36 u64 extent_item_pos,
37 struct extent_inode_elem **eie) 37 struct extent_inode_elem **eie)
38{ 38{
39 u64 data_offset; 39 u64 offset = 0;
40 u64 data_len;
41 struct extent_inode_elem *e; 40 struct extent_inode_elem *e;
42 41
43 data_offset = btrfs_file_extent_offset(eb, fi); 42 if (!btrfs_file_extent_compression(eb, fi) &&
44 data_len = btrfs_file_extent_num_bytes(eb, fi); 43 !btrfs_file_extent_encryption(eb, fi) &&
44 !btrfs_file_extent_other_encoding(eb, fi)) {
45 u64 data_offset;
46 u64 data_len;
45 47
46 if (extent_item_pos < data_offset || 48 data_offset = btrfs_file_extent_offset(eb, fi);
47 extent_item_pos >= data_offset + data_len) 49 data_len = btrfs_file_extent_num_bytes(eb, fi);
48 return 1; 50
51 if (extent_item_pos < data_offset ||
52 extent_item_pos >= data_offset + data_len)
53 return 1;
54 offset = extent_item_pos - data_offset;
55 }
49 56
50 e = kmalloc(sizeof(*e), GFP_NOFS); 57 e = kmalloc(sizeof(*e), GFP_NOFS);
51 if (!e) 58 if (!e)
@@ -53,7 +60,7 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
53 60
54 e->next = *eie; 61 e->next = *eie;
55 e->inum = key->objectid; 62 e->inum = key->objectid;
56 e->offset = key->offset + (extent_item_pos - data_offset); 63 e->offset = key->offset + offset;
57 *eie = e; 64 *eie = e;
58 65
59 return 0; 66 return 0;
@@ -189,7 +196,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
189 struct extent_buffer *eb; 196 struct extent_buffer *eb;
190 struct btrfs_key key; 197 struct btrfs_key key;
191 struct btrfs_file_extent_item *fi; 198 struct btrfs_file_extent_item *fi;
192 struct extent_inode_elem *eie = NULL; 199 struct extent_inode_elem *eie = NULL, *old = NULL;
193 u64 disk_byte; 200 u64 disk_byte;
194 201
195 if (level != 0) { 202 if (level != 0) {
@@ -223,6 +230,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
223 230
224 if (disk_byte == wanted_disk_byte) { 231 if (disk_byte == wanted_disk_byte) {
225 eie = NULL; 232 eie = NULL;
233 old = NULL;
226 if (extent_item_pos) { 234 if (extent_item_pos) {
227 ret = check_extent_in_eb(&key, eb, fi, 235 ret = check_extent_in_eb(&key, eb, fi,
228 *extent_item_pos, 236 *extent_item_pos,
@@ -230,18 +238,20 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
230 if (ret < 0) 238 if (ret < 0)
231 break; 239 break;
232 } 240 }
233 if (!ret) { 241 if (ret > 0)
234 ret = ulist_add(parents, eb->start, 242 goto next;
235 (uintptr_t)eie, GFP_NOFS); 243 ret = ulist_add_merge(parents, eb->start,
236 if (ret < 0) 244 (uintptr_t)eie,
237 break; 245 (u64 *)&old, GFP_NOFS);
238 if (!extent_item_pos) { 246 if (ret < 0)
239 ret = btrfs_next_old_leaf(root, path, 247 break;
240 time_seq); 248 if (!ret && extent_item_pos) {
241 continue; 249 while (old->next)
242 } 250 old = old->next;
251 old->next = eie;
243 } 252 }
244 } 253 }
254next:
245 ret = btrfs_next_old_item(root, path, time_seq); 255 ret = btrfs_next_old_item(root, path, time_seq);
246 } 256 }
247 257
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 5bf4c39e2ad6..ed504607d8ec 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1271,7 +1271,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1271 BUG_ON(!eb_rewin); 1271 BUG_ON(!eb_rewin);
1272 } 1272 }
1273 1273
1274 extent_buffer_get(eb_rewin);
1275 btrfs_tree_read_unlock(eb); 1274 btrfs_tree_read_unlock(eb);
1276 free_extent_buffer(eb); 1275 free_extent_buffer(eb);
1277 1276
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 583d98bd065e..fe443fece851 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4048,7 +4048,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4048 } 4048 }
4049 4049
4050 while (!end) { 4050 while (!end) {
4051 u64 offset_in_extent; 4051 u64 offset_in_extent = 0;
4052 4052
4053 /* break if the extent we found is outside the range */ 4053 /* break if the extent we found is outside the range */
4054 if (em->start >= max || extent_map_end(em) < off) 4054 if (em->start >= max || extent_map_end(em) < off)
@@ -4064,9 +4064,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4064 4064
4065 /* 4065 /*
4066 * record the offset from the start of the extent 4066 * record the offset from the start of the extent
4067 * for adjusting the disk offset below 4067 * for adjusting the disk offset below. Only do this if the
4068 * extent isn't compressed since our in ram offset may be past
4069 * what we have actually allocated on disk.
4068 */ 4070 */
4069 offset_in_extent = em_start - em->start; 4071 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4072 offset_in_extent = em_start - em->start;
4070 em_end = extent_map_end(em); 4073 em_end = extent_map_end(em);
4071 em_len = em_end - em_start; 4074 em_len = em_end - em_start;
4072 emflags = em->flags; 4075 emflags = em->flags;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a005fe2c072a..8e686a427ce2 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -596,20 +596,29 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
596 if (no_splits) 596 if (no_splits)
597 goto next; 597 goto next;
598 598
599 if (em->block_start < EXTENT_MAP_LAST_BYTE && 599 if (em->start < start) {
600 em->start < start) {
601 split->start = em->start; 600 split->start = em->start;
602 split->len = start - em->start; 601 split->len = start - em->start;
603 split->orig_start = em->orig_start;
604 split->block_start = em->block_start;
605 602
606 if (compressed) 603 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
607 split->block_len = em->block_len; 604 split->orig_start = em->orig_start;
608 else 605 split->block_start = em->block_start;
609 split->block_len = split->len; 606
610 split->ram_bytes = em->ram_bytes; 607 if (compressed)
611 split->orig_block_len = max(split->block_len, 608 split->block_len = em->block_len;
612 em->orig_block_len); 609 else
610 split->block_len = split->len;
611 split->orig_block_len = max(split->block_len,
612 em->orig_block_len);
613 split->ram_bytes = em->ram_bytes;
614 } else {
615 split->orig_start = split->start;
616 split->block_len = 0;
617 split->block_start = em->block_start;
618 split->orig_block_len = 0;
619 split->ram_bytes = split->len;
620 }
621
613 split->generation = gen; 622 split->generation = gen;
614 split->bdev = em->bdev; 623 split->bdev = em->bdev;
615 split->flags = flags; 624 split->flags = flags;
@@ -620,8 +629,7 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
620 split = split2; 629 split = split2;
621 split2 = NULL; 630 split2 = NULL;
622 } 631 }
623 if (em->block_start < EXTENT_MAP_LAST_BYTE && 632 if (testend && em->start + em->len > start + len) {
624 testend && em->start + em->len > start + len) {
625 u64 diff = start + len - em->start; 633 u64 diff = start + len - em->start;
626 634
627 split->start = start + len; 635 split->start = start + len;
@@ -630,18 +638,28 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
630 split->flags = flags; 638 split->flags = flags;
631 split->compress_type = em->compress_type; 639 split->compress_type = em->compress_type;
632 split->generation = gen; 640 split->generation = gen;
633 split->orig_block_len = max(em->block_len, 641
642 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
643 split->orig_block_len = max(em->block_len,
634 em->orig_block_len); 644 em->orig_block_len);
635 split->ram_bytes = em->ram_bytes;
636 645
637 if (compressed) { 646 split->ram_bytes = em->ram_bytes;
638 split->block_len = em->block_len; 647 if (compressed) {
639 split->block_start = em->block_start; 648 split->block_len = em->block_len;
640 split->orig_start = em->orig_start; 649 split->block_start = em->block_start;
650 split->orig_start = em->orig_start;
651 } else {
652 split->block_len = split->len;
653 split->block_start = em->block_start
654 + diff;
655 split->orig_start = em->orig_start;
656 }
641 } else { 657 } else {
642 split->block_len = split->len; 658 split->ram_bytes = split->len;
643 split->block_start = em->block_start + diff; 659 split->orig_start = split->start;
644 split->orig_start = em->orig_start; 660 split->block_len = 0;
661 split->block_start = em->block_start;
662 split->orig_block_len = 0;
645 } 663 }
646 664
647 ret = add_extent_mapping(em_tree, split, modified); 665 ret = add_extent_mapping(em_tree, split, modified);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 6d1b93c8aafb..021694c08181 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2166,16 +2166,23 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2166 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) 2166 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2167 continue; 2167 continue;
2168 2168
2169 extent_offset = btrfs_file_extent_offset(leaf, extent); 2169 /*
2170 if (key.offset - extent_offset != offset) 2170 * 'offset' refers to the exact key.offset,
2171 * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2172 * (key.offset - extent_offset).
2173 */
2174 if (key.offset != offset)
2171 continue; 2175 continue;
2172 2176
2177 extent_offset = btrfs_file_extent_offset(leaf, extent);
2173 num_bytes = btrfs_file_extent_num_bytes(leaf, extent); 2178 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2179
2174 if (extent_offset >= old->extent_offset + old->offset + 2180 if (extent_offset >= old->extent_offset + old->offset +
2175 old->len || extent_offset + num_bytes <= 2181 old->len || extent_offset + num_bytes <=
2176 old->extent_offset + old->offset) 2182 old->extent_offset + old->offset)
2177 continue; 2183 continue;
2178 2184
2185 ret = 0;
2179 break; 2186 break;
2180 } 2187 }
2181 2188
@@ -2187,7 +2194,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2187 2194
2188 backref->root_id = root_id; 2195 backref->root_id = root_id;
2189 backref->inum = inum; 2196 backref->inum = inum;
2190 backref->file_pos = offset + extent_offset; 2197 backref->file_pos = offset;
2191 backref->num_bytes = num_bytes; 2198 backref->num_bytes = num_bytes;
2192 backref->extent_offset = extent_offset; 2199 backref->extent_offset = extent_offset;
2193 backref->generation = btrfs_file_extent_generation(leaf, extent); 2200 backref->generation = btrfs_file_extent_generation(leaf, extent);
@@ -2210,7 +2217,8 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path,
2210 new->path = path; 2217 new->path = path;
2211 2218
2212 list_for_each_entry_safe(old, tmp, &new->head, list) { 2219 list_for_each_entry_safe(old, tmp, &new->head, list) {
2213 ret = iterate_inodes_from_logical(old->bytenr, fs_info, 2220 ret = iterate_inodes_from_logical(old->bytenr +
2221 old->extent_offset, fs_info,
2214 path, record_one_backref, 2222 path, record_one_backref,
2215 old); 2223 old);
2216 BUG_ON(ret < 0 && ret != -ENOENT); 2224 BUG_ON(ret < 0 && ret != -ENOENT);
@@ -4391,9 +4399,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4391 int mask = attr->ia_valid; 4399 int mask = attr->ia_valid;
4392 int ret; 4400 int ret;
4393 4401
4394 if (newsize == oldsize)
4395 return 0;
4396
4397 /* 4402 /*
4398 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 4403 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4399 * special case where we need to update the times despite not having 4404 * special case where we need to update the times despite not having
@@ -5165,14 +5170,31 @@ next:
5165 } 5170 }
5166 5171
5167 /* Reached end of directory/root. Bump pos past the last item. */ 5172 /* Reached end of directory/root. Bump pos past the last item. */
5168 if (key_type == BTRFS_DIR_INDEX_KEY) 5173 ctx->pos++;
5169 /* 5174
5170 * 32-bit glibc will use getdents64, but then strtol - 5175 /*
5171 * so the last number we can serve is this. 5176 * Stop new entries from being returned after we return the last
5172 */ 5177 * entry.
5173 ctx->pos = 0x7fffffff; 5178 *
5174 else 5179 * New directory entries are assigned a strictly increasing
5175 ctx->pos++; 5180 * offset. This means that new entries created during readdir
5181 * are *guaranteed* to be seen in the future by that readdir.
5182 * This has broken buggy programs which operate on names as
5183 * they're returned by readdir. Until we re-use freed offsets
5184 * we have this hack to stop new entries from being returned
5185 * under the assumption that they'll never reach this huge
5186 * offset.
5187 *
5188 * This is being careful not to overflow 32bit loff_t unless the
5189 * last entry requires it because doing so has broken 32bit apps
5190 * in the past.
5191 */
5192 if (key_type == BTRFS_DIR_INDEX_KEY) {
5193 if (ctx->pos >= INT_MAX)
5194 ctx->pos = LLONG_MAX;
5195 else
5196 ctx->pos = INT_MAX;
5197 }
5176nopos: 5198nopos:
5177 ret = 0; 5199 ret = 0;
5178err: 5200err:
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index d58cce77fc6c..af1931a5960d 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -983,12 +983,12 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
983 * a dirty root struct and adds it into the list of dead roots that need to 983 * a dirty root struct and adds it into the list of dead roots that need to
984 * be deleted 984 * be deleted
985 */ 985 */
986int btrfs_add_dead_root(struct btrfs_root *root) 986void btrfs_add_dead_root(struct btrfs_root *root)
987{ 987{
988 spin_lock(&root->fs_info->trans_lock); 988 spin_lock(&root->fs_info->trans_lock);
989 list_add_tail(&root->root_list, &root->fs_info->dead_roots); 989 if (list_empty(&root->root_list))
990 list_add_tail(&root->root_list, &root->fs_info->dead_roots);
990 spin_unlock(&root->fs_info->trans_lock); 991 spin_unlock(&root->fs_info->trans_lock);
991 return 0;
992} 992}
993 993
994/* 994/*
@@ -1925,7 +1925,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
1925 } 1925 }
1926 root = list_first_entry(&fs_info->dead_roots, 1926 root = list_first_entry(&fs_info->dead_roots,
1927 struct btrfs_root, root_list); 1927 struct btrfs_root, root_list);
1928 list_del(&root->root_list); 1928 list_del_init(&root->root_list);
1929 spin_unlock(&fs_info->trans_lock); 1929 spin_unlock(&fs_info->trans_lock);
1930 1930
1931 pr_debug("btrfs: cleaner removing %llu\n", 1931 pr_debug("btrfs: cleaner removing %llu\n",
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 005b0375d18c..defbc4269897 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -143,7 +143,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
143int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 143int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
144 struct btrfs_root *root); 144 struct btrfs_root *root);
145 145
146int btrfs_add_dead_root(struct btrfs_root *root); 146void btrfs_add_dead_root(struct btrfs_root *root);
147int btrfs_defrag_root(struct btrfs_root *root); 147int btrfs_defrag_root(struct btrfs_root *root);
148int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root); 148int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
149int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 149int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 2c6791493637..ff60d8978ae2 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3746,8 +3746,9 @@ next_slot:
3746 } 3746 }
3747 3747
3748log_extents: 3748log_extents:
3749 btrfs_release_path(path);
3750 btrfs_release_path(dst_path);
3749 if (fast_search) { 3751 if (fast_search) {
3750 btrfs_release_path(dst_path);
3751 ret = btrfs_log_changed_extents(trans, root, inode, dst_path); 3752 ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
3752 if (ret) { 3753 if (ret) {
3753 err = ret; 3754 err = ret;
@@ -3764,8 +3765,6 @@ log_extents:
3764 } 3765 }
3765 3766
3766 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { 3767 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
3767 btrfs_release_path(path);
3768 btrfs_release_path(dst_path);
3769 ret = log_directory_changes(trans, root, inode, path, dst_path); 3768 ret = log_directory_changes(trans, root, inode, path, dst_path);
3770 if (ret) { 3769 if (ret) {
3771 err = ret; 3770 err = ret;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 45e57cc38200..fc6f4f3a1a9d 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
43 server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); 43 server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
44 if (IS_ERR(server->secmech.md5)) { 44 if (IS_ERR(server->secmech.md5)) {
45 cifs_dbg(VFS, "could not allocate crypto md5\n"); 45 cifs_dbg(VFS, "could not allocate crypto md5\n");
46 return PTR_ERR(server->secmech.md5); 46 rc = PTR_ERR(server->secmech.md5);
47 server->secmech.md5 = NULL;
48 return rc;
47 } 49 }
48 50
49 size = sizeof(struct shash_desc) + 51 size = sizeof(struct shash_desc) +
50 crypto_shash_descsize(server->secmech.md5); 52 crypto_shash_descsize(server->secmech.md5);
51 server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); 53 server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
52 if (!server->secmech.sdescmd5) { 54 if (!server->secmech.sdescmd5) {
53 rc = -ENOMEM;
54 crypto_free_shash(server->secmech.md5); 55 crypto_free_shash(server->secmech.md5);
55 server->secmech.md5 = NULL; 56 server->secmech.md5 = NULL;
56 return rc; 57 return -ENOMEM;
57 } 58 }
58 server->secmech.sdescmd5->shash.tfm = server->secmech.md5; 59 server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
59 server->secmech.sdescmd5->shash.flags = 0x0; 60 server->secmech.sdescmd5->shash.flags = 0x0;
@@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
421 if (blobptr + attrsize > blobend) 422 if (blobptr + attrsize > blobend)
422 break; 423 break;
423 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { 424 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
424 if (!attrsize) 425 if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
425 break; 426 break;
426 if (!ses->domainName) { 427 if (!ses->domainName) {
427 ses->domainName = 428 ses->domainName =
@@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
591 592
592static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) 593static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
593{ 594{
595 int rc;
594 unsigned int size; 596 unsigned int size;
595 597
596 /* check if already allocated */ 598 /* check if already allocated */
@@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
600 server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); 602 server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
601 if (IS_ERR(server->secmech.hmacmd5)) { 603 if (IS_ERR(server->secmech.hmacmd5)) {
602 cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); 604 cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
603 return PTR_ERR(server->secmech.hmacmd5); 605 rc = PTR_ERR(server->secmech.hmacmd5);
606 server->secmech.hmacmd5 = NULL;
607 return rc;
604 } 608 }
605 609
606 size = sizeof(struct shash_desc) + 610 size = sizeof(struct shash_desc) +
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 4bdd547dbf6f..85ea98d139fc 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb)
147 goto out_no_root; 147 goto out_no_root;
148 } 148 }
149 149
150 if (cifs_sb_master_tcon(cifs_sb)->nocase)
151 sb->s_d_op = &cifs_ci_dentry_ops;
152 else
153 sb->s_d_op = &cifs_dentry_ops;
154
150 sb->s_root = d_make_root(inode); 155 sb->s_root = d_make_root(inode);
151 if (!sb->s_root) { 156 if (!sb->s_root) {
152 rc = -ENOMEM; 157 rc = -ENOMEM;
153 goto out_no_root; 158 goto out_no_root;
154 } 159 }
155 160
156 /* do that *after* d_make_root() - we want NULL ->d_op for root here */
157 if (cifs_sb_master_tcon(cifs_sb)->nocase)
158 sb->s_d_op = &cifs_ci_dentry_ops;
159 else
160 sb->s_d_op = &cifs_dentry_ops;
161
162#ifdef CONFIG_CIFS_NFSD_EXPORT 161#ifdef CONFIG_CIFS_NFSD_EXPORT
163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 162 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
164 cifs_dbg(FYI, "export ops supported\n"); 163 cifs_dbg(FYI, "export ops supported\n");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1fdc37041057..52ca861ed35e 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -44,6 +44,7 @@
44#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) 44#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
45#define MAX_SERVER_SIZE 15 45#define MAX_SERVER_SIZE 15
46#define MAX_SHARE_SIZE 80 46#define MAX_SHARE_SIZE 80
47#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
47#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ 48#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
48#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ 49#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
49 50
@@ -369,6 +370,9 @@ struct smb_version_operations {
369 void (*generate_signingkey)(struct TCP_Server_Info *server); 370 void (*generate_signingkey)(struct TCP_Server_Info *server);
370 int (*calc_signature)(struct smb_rqst *rqst, 371 int (*calc_signature)(struct smb_rqst *rqst,
371 struct TCP_Server_Info *server); 372 struct TCP_Server_Info *server);
373 int (*query_mf_symlink)(const unsigned char *path, char *pbuf,
374 unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
375 unsigned int xid);
372}; 376};
373 377
374struct smb_version_values { 378struct smb_version_values {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f7e584d047e2..b29a012bed33 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work);
497struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, 497struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
498 work_func_t complete); 498 work_func_t complete);
499void cifs_writedata_release(struct kref *refcount); 499void cifs_writedata_release(struct kref *refcount);
500 500int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
501 unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
502 unsigned int xid);
501#endif /* _CIFSPROTO_H */ 503#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index fa68813396b5..d67c550c4980 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1675 if (string == NULL) 1675 if (string == NULL)
1676 goto out_nomem; 1676 goto out_nomem;
1677 1677
1678 if (strnlen(string, 256) == 256) { 1678 if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
1679 == CIFS_MAX_DOMAINNAME_LEN) {
1679 printk(KERN_WARNING "CIFS: domain name too" 1680 printk(KERN_WARNING "CIFS: domain name too"
1680 " long\n"); 1681 " long\n");
1681 goto cifs_parse_mount_err; 1682 goto cifs_parse_mount_err;
@@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
2276 2277
2277#ifdef CONFIG_KEYS 2278#ifdef CONFIG_KEYS
2278 2279
2279/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */ 2280/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
2280#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1) 2281#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
2281 2282
2282/* Populate username and pw fields from keyring if possible */ 2283/* Populate username and pw fields from keyring if possible */
2283static int 2284static int
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1e57f36ea1b2..7e36ae34e947 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
647 oflags, &oplock, &cfile->fid.netfid, xid); 647 oflags, &oplock, &cfile->fid.netfid, xid);
648 if (rc == 0) { 648 if (rc == 0) {
649 cifs_dbg(FYI, "posix reopen succeeded\n"); 649 cifs_dbg(FYI, "posix reopen succeeded\n");
650 oparms.reconnect = true;
650 goto reopen_success; 651 goto reopen_success;
651 } 652 }
652 /* 653 /*
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index b83c3f5646bd..562044f700e5 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
305} 305}
306 306
307int 307int
308CIFSCheckMFSymlink(struct cifs_fattr *fattr, 308open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
309 const unsigned char *path, 309 unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
310 struct cifs_sb_info *cifs_sb, unsigned int xid) 310 unsigned int xid)
311{ 311{
312 int rc; 312 int rc;
313 int oplock = 0; 313 int oplock = 0;
314 __u16 netfid = 0; 314 __u16 netfid = 0;
315 struct tcon_link *tlink; 315 struct tcon_link *tlink;
316 struct cifs_tcon *pTcon; 316 struct cifs_tcon *ptcon;
317 struct cifs_io_parms io_parms; 317 struct cifs_io_parms io_parms;
318 u8 *buf;
319 char *pbuf;
320 unsigned int bytes_read = 0;
321 int buf_type = CIFS_NO_BUFFER; 318 int buf_type = CIFS_NO_BUFFER;
322 unsigned int link_len = 0;
323 FILE_ALL_INFO file_info; 319 FILE_ALL_INFO file_info;
324 320
325 if (!CIFSCouldBeMFSymlink(fattr))
326 /* it's not a symlink */
327 return 0;
328
329 tlink = cifs_sb_tlink(cifs_sb); 321 tlink = cifs_sb_tlink(cifs_sb);
330 if (IS_ERR(tlink)) 322 if (IS_ERR(tlink))
331 return PTR_ERR(tlink); 323 return PTR_ERR(tlink);
332 pTcon = tlink_tcon(tlink); 324 ptcon = tlink_tcon(tlink);
333 325
334 rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ, 326 rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ,
335 CREATE_NOT_DIR, &netfid, &oplock, &file_info, 327 CREATE_NOT_DIR, &netfid, &oplock, &file_info,
336 cifs_sb->local_nls, 328 cifs_sb->local_nls,
337 cifs_sb->mnt_cifs_flags & 329 cifs_sb->mnt_cifs_flags &
338 CIFS_MOUNT_MAP_SPECIAL_CHR); 330 CIFS_MOUNT_MAP_SPECIAL_CHR);
339 if (rc != 0) 331 if (rc != 0) {
340 goto out; 332 cifs_put_tlink(tlink);
333 return rc;
334 }
341 335
342 if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { 336 if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
343 CIFSSMBClose(xid, pTcon, netfid); 337 CIFSSMBClose(xid, ptcon, netfid);
338 cifs_put_tlink(tlink);
344 /* it's not a symlink */ 339 /* it's not a symlink */
345 goto out; 340 return rc;
346 } 341 }
347 342
348 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
349 if (!buf) {
350 rc = -ENOMEM;
351 goto out;
352 }
353 pbuf = buf;
354 io_parms.netfid = netfid; 343 io_parms.netfid = netfid;
355 io_parms.pid = current->tgid; 344 io_parms.pid = current->tgid;
356 io_parms.tcon = pTcon; 345 io_parms.tcon = ptcon;
357 io_parms.offset = 0; 346 io_parms.offset = 0;
358 io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; 347 io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
359 348
360 rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type); 349 rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
361 CIFSSMBClose(xid, pTcon, netfid); 350 CIFSSMBClose(xid, ptcon, netfid);
362 if (rc != 0) { 351 cifs_put_tlink(tlink);
363 kfree(buf); 352 return rc;
353}
354
355
356int
357CIFSCheckMFSymlink(struct cifs_fattr *fattr,
358 const unsigned char *path,
359 struct cifs_sb_info *cifs_sb, unsigned int xid)
360{
361 int rc = 0;
362 u8 *buf = NULL;
363 unsigned int link_len = 0;
364 unsigned int bytes_read = 0;
365 struct cifs_tcon *ptcon;
366
367 if (!CIFSCouldBeMFSymlink(fattr))
368 /* it's not a symlink */
369 return 0;
370
371 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
372 if (!buf) {
373 rc = -ENOMEM;
364 goto out; 374 goto out;
365 } 375 }
366 376
377 ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
378 if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
379 rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
380 &bytes_read, cifs_sb, xid);
381 else
382 goto out;
383
384 if (rc != 0)
385 goto out;
386
387 if (bytes_read == 0) /* not a symlink */
388 goto out;
389
367 rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL); 390 rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
368 kfree(buf);
369 if (rc == -EINVAL) { 391 if (rc == -EINVAL) {
370 /* it's not a symlink */ 392 /* it's not a symlink */
371 rc = 0; 393 rc = 0;
@@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
381 fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; 403 fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
382 fattr->cf_dtype = DT_LNK; 404 fattr->cf_dtype = DT_LNK;
383out: 405out:
384 cifs_put_tlink(tlink); 406 kfree(buf);
385 return rc; 407 return rc;
386} 408}
387 409
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index ab8778469394..69d2c826a23b 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
111 return; 111 return;
112 } 112 }
113 113
114 /*
115 * If we know that the inode will need to be revalidated immediately,
116 * then don't create a new dentry for it. We'll end up doing an on
117 * the wire call either way and this spares us an invalidation.
118 */
119 if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
120 return;
121
114 dentry = d_alloc(parent, name); 122 dentry = d_alloc(parent, name);
115 if (!dentry) 123 if (!dentry)
116 return; 124 return;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 79358e341fd2..08dd37bb23aa 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
197 bytes_ret = 0; 197 bytes_ret = 0;
198 } else 198 } else
199 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, 199 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
200 256, nls_cp); 200 CIFS_MAX_DOMAINNAME_LEN, nls_cp);
201 bcc_ptr += 2 * bytes_ret; 201 bcc_ptr += 2 * bytes_ret;
202 bcc_ptr += 2; /* account for null terminator */ 202 bcc_ptr += 2; /* account for null terminator */
203 203
@@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
255 255
256 /* copy domain */ 256 /* copy domain */
257 if (ses->domainName != NULL) { 257 if (ses->domainName != NULL) {
258 strncpy(bcc_ptr, ses->domainName, 256); 258 strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
259 bcc_ptr += strnlen(ses->domainName, 256); 259 bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
260 } /* else we will send a null domain name 260 } /* else we will send a null domain name
261 so the server will default to its own domain */ 261 so the server will default to its own domain */
262 *bcc_ptr = 0; 262 *bcc_ptr = 0;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 6457690731a2..60943978aec3 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = {
944 .mand_lock = cifs_mand_lock, 944 .mand_lock = cifs_mand_lock,
945 .mand_unlock_range = cifs_unlock_range, 945 .mand_unlock_range = cifs_unlock_range,
946 .push_mand_locks = cifs_push_mandatory_locks, 946 .push_mand_locks = cifs_push_mandatory_locks,
947 .query_mf_symlink = open_query_close_cifs_symlink,
947}; 948};
948 949
949struct smb_version_values smb1_values = { 950struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 301b191270b9..4f2300d020c7 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -42,6 +42,7 @@
42static int 42static int
43smb2_crypto_shash_allocate(struct TCP_Server_Info *server) 43smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
44{ 44{
45 int rc;
45 unsigned int size; 46 unsigned int size;
46 47
47 if (server->secmech.sdeschmacsha256 != NULL) 48 if (server->secmech.sdeschmacsha256 != NULL)
@@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
50 server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); 51 server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
51 if (IS_ERR(server->secmech.hmacsha256)) { 52 if (IS_ERR(server->secmech.hmacsha256)) {
52 cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); 53 cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
53 return PTR_ERR(server->secmech.hmacsha256); 54 rc = PTR_ERR(server->secmech.hmacsha256);
55 server->secmech.hmacsha256 = NULL;
56 return rc;
54 } 57 }
55 58
56 size = sizeof(struct shash_desc) + 59 size = sizeof(struct shash_desc) +
@@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
87 server->secmech.sdeschmacsha256 = NULL; 90 server->secmech.sdeschmacsha256 = NULL;
88 crypto_free_shash(server->secmech.hmacsha256); 91 crypto_free_shash(server->secmech.hmacsha256);
89 server->secmech.hmacsha256 = NULL; 92 server->secmech.hmacsha256 = NULL;
90 return PTR_ERR(server->secmech.cmacaes); 93 rc = PTR_ERR(server->secmech.cmacaes);
94 server->secmech.cmacaes = NULL;
95 return rc;
91 } 96 }
92 97
93 size = sizeof(struct shash_desc) + 98 size = sizeof(struct shash_desc) +
diff --git a/fs/dcache.c b/fs/dcache.c
index 87bdb5329c3c..83cfb834db03 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2724,6 +2724,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2724 return memcpy(buffer, temp, sz); 2724 return memcpy(buffer, temp, sz);
2725} 2725}
2726 2726
2727char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
2728{
2729 char *end = buffer + buflen;
2730 /* these dentries are never renamed, so d_lock is not needed */
2731 if (prepend(&end, &buflen, " (deleted)", 11) ||
2732 prepend_name(&end, &buflen, &dentry->d_name) ||
2733 prepend(&end, &buflen, "/", 1))
2734 end = ERR_PTR(-ENAMETOOLONG);
2735 return end;
2736}
2737
2727/* 2738/*
2728 * Write full pathname from the root of the filesystem into the buffer. 2739 * Write full pathname from the root of the filesystem into the buffer.
2729 */ 2740 */
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 4888cb3fdef7..c7c83ff0f752 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -533,8 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
533 */ 533 */
534void debugfs_remove_recursive(struct dentry *dentry) 534void debugfs_remove_recursive(struct dentry *dentry)
535{ 535{
536 struct dentry *child; 536 struct dentry *child, *next, *parent;
537 struct dentry *parent;
538 537
539 if (IS_ERR_OR_NULL(dentry)) 538 if (IS_ERR_OR_NULL(dentry))
540 return; 539 return;
@@ -544,61 +543,37 @@ void debugfs_remove_recursive(struct dentry *dentry)
544 return; 543 return;
545 544
546 parent = dentry; 545 parent = dentry;
546 down:
547 mutex_lock(&parent->d_inode->i_mutex); 547 mutex_lock(&parent->d_inode->i_mutex);
548 list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
549 if (!debugfs_positive(child))
550 continue;
548 551
549 while (1) { 552 /* perhaps simple_empty(child) makes more sense */
550 /*
551 * When all dentries under "parent" has been removed,
552 * walk up the tree until we reach our starting point.
553 */
554 if (list_empty(&parent->d_subdirs)) {
555 mutex_unlock(&parent->d_inode->i_mutex);
556 if (parent == dentry)
557 break;
558 parent = parent->d_parent;
559 mutex_lock(&parent->d_inode->i_mutex);
560 }
561 child = list_entry(parent->d_subdirs.next, struct dentry,
562 d_u.d_child);
563 next_sibling:
564
565 /*
566 * If "child" isn't empty, walk down the tree and
567 * remove all its descendants first.
568 */
569 if (!list_empty(&child->d_subdirs)) { 553 if (!list_empty(&child->d_subdirs)) {
570 mutex_unlock(&parent->d_inode->i_mutex); 554 mutex_unlock(&parent->d_inode->i_mutex);
571 parent = child; 555 parent = child;
572 mutex_lock(&parent->d_inode->i_mutex); 556 goto down;
573 continue;
574 } 557 }
575 __debugfs_remove(child, parent); 558 up:
576 if (parent->d_subdirs.next == &child->d_u.d_child) { 559 if (!__debugfs_remove(child, parent))
577 /* 560 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
578 * Try the next sibling.
579 */
580 if (child->d_u.d_child.next != &parent->d_subdirs) {
581 child = list_entry(child->d_u.d_child.next,
582 struct dentry,
583 d_u.d_child);
584 goto next_sibling;
585 }
586
587 /*
588 * Avoid infinite loop if we fail to remove
589 * one dentry.
590 */
591 mutex_unlock(&parent->d_inode->i_mutex);
592 break;
593 }
594 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
595 } 561 }
596 562
597 parent = dentry->d_parent; 563 mutex_unlock(&parent->d_inode->i_mutex);
564 child = parent;
565 parent = parent->d_parent;
598 mutex_lock(&parent->d_inode->i_mutex); 566 mutex_lock(&parent->d_inode->i_mutex);
599 __debugfs_remove(dentry, parent); 567
568 if (child != dentry) {
569 next = list_entry(child->d_u.d_child.next, struct dentry,
570 d_u.d_child);
571 goto up;
572 }
573
574 if (!__debugfs_remove(child, parent))
575 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
600 mutex_unlock(&parent->d_inode->i_mutex); 576 mutex_unlock(&parent->d_inode->i_mutex);
601 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
602} 577}
603EXPORT_SYMBOL_GPL(debugfs_remove_recursive); 578EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
604 579
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 911649a47dd5..812149119fa3 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -686,7 +686,6 @@ static int device_close(struct inode *inode, struct file *file)
686 device_remove_lockspace() */ 686 device_remove_lockspace() */
687 687
688 sigprocmask(SIG_SETMASK, &tmpsig, NULL); 688 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
689 recalc_sigpending();
690 689
691 return 0; 690 return 0;
692} 691}
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index f3913eb2c474..d15ccf20f1b3 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -57,7 +57,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
57 struct inode *inode; 57 struct inode *inode;
58 58
59 inode = iget_locked(super, ino); 59 inode = iget_locked(super, ino);
60 if (IS_ERR(inode)) 60 if (!inode)
61 return ERR_PTR(-ENOMEM); 61 return ERR_PTR(-ENOMEM);
62 if (!(inode->i_state & I_NEW)) 62 if (!(inode->i_state & I_NEW))
63 return inode; 63 return inode;
diff --git a/fs/exec.c b/fs/exec.c
index 9c73def87642..fd774c7cb483 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
608 return -ENOMEM; 608 return -ENOMEM;
609 609
610 lru_add_drain(); 610 lru_add_drain();
611 tlb_gather_mmu(&tlb, mm, 0); 611 tlb_gather_mmu(&tlb, mm, old_start, old_end);
612 if (new_end > old_start) { 612 if (new_end > old_start) {
613 /* 613 /*
614 * when the old and new regions overlap clear from new_end. 614 * when the old and new regions overlap clear from new_end.
@@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
625 free_pgd_range(&tlb, old_start, old_end, new_end, 625 free_pgd_range(&tlb, old_start, old_end, new_end,
626 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); 626 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
627 } 627 }
628 tlb_finish_mmu(&tlb, new_end, old_end); 628 tlb_finish_mmu(&tlb, old_start, old_end);
629 629
630 /* 630 /*
631 * Shrink the vma to just the new range. Always succeeds. 631 * Shrink the vma to just the new range. Always succeeds.
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b577e45425b0..0ab26fbf3380 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2086,6 +2086,7 @@ extern int ext4_sync_inode(handle_t *, struct inode *);
2086extern void ext4_dirty_inode(struct inode *, int); 2086extern void ext4_dirty_inode(struct inode *, int);
2087extern int ext4_change_inode_journal_flag(struct inode *, int); 2087extern int ext4_change_inode_journal_flag(struct inode *, int);
2088extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); 2088extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
2089extern int ext4_inode_attach_jinode(struct inode *inode);
2089extern int ext4_can_truncate(struct inode *inode); 2090extern int ext4_can_truncate(struct inode *inode);
2090extern void ext4_truncate(struct inode *); 2091extern void ext4_truncate(struct inode *);
2091extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); 2092extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 72a3600aedbd..17ac112ab101 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -255,10 +255,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
255 set_buffer_prio(bh); 255 set_buffer_prio(bh);
256 if (ext4_handle_valid(handle)) { 256 if (ext4_handle_valid(handle)) {
257 err = jbd2_journal_dirty_metadata(handle, bh); 257 err = jbd2_journal_dirty_metadata(handle, bh);
258 if (err) { 258 /* Errors can only happen if there is a bug */
259 /* Errors can only happen if there is a bug */ 259 if (WARN_ON_ONCE(err)) {
260 handle->h_err = err; 260 ext4_journal_abort_handle(where, line, __func__, bh,
261 __ext4_journal_stop(where, line, handle); 261 handle, err);
262 } 262 }
263 } else { 263 } else {
264 if (inode) 264 if (inode)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a61873808f76..72ba4705d4fa 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4412,7 +4412,7 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode)
4412retry: 4412retry:
4413 err = ext4_es_remove_extent(inode, last_block, 4413 err = ext4_es_remove_extent(inode, last_block,
4414 EXT_MAX_BLOCKS - last_block); 4414 EXT_MAX_BLOCKS - last_block);
4415 if (err == ENOMEM) { 4415 if (err == -ENOMEM) {
4416 cond_resched(); 4416 cond_resched();
4417 congestion_wait(BLK_RW_ASYNC, HZ/50); 4417 congestion_wait(BLK_RW_ASYNC, HZ/50);
4418 goto retry; 4418 goto retry;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6f4cc567c382..319c9d26279a 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -219,7 +219,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
219{ 219{
220 struct super_block *sb = inode->i_sb; 220 struct super_block *sb = inode->i_sb;
221 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 221 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
222 struct ext4_inode_info *ei = EXT4_I(inode);
223 struct vfsmount *mnt = filp->f_path.mnt; 222 struct vfsmount *mnt = filp->f_path.mnt;
224 struct path path; 223 struct path path;
225 char buf[64], *cp; 224 char buf[64], *cp;
@@ -259,22 +258,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
259 * Set up the jbd2_inode if we are opening the inode for 258 * Set up the jbd2_inode if we are opening the inode for
260 * writing and the journal is present 259 * writing and the journal is present
261 */ 260 */
262 if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { 261 if (filp->f_mode & FMODE_WRITE) {
263 struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); 262 int ret = ext4_inode_attach_jinode(inode);
264 263 if (ret < 0)
265 spin_lock(&inode->i_lock); 264 return ret;
266 if (!ei->jinode) {
267 if (!jinode) {
268 spin_unlock(&inode->i_lock);
269 return -ENOMEM;
270 }
271 ei->jinode = jinode;
272 jbd2_journal_init_jbd_inode(ei->jinode, inode);
273 jinode = NULL;
274 }
275 spin_unlock(&inode->i_lock);
276 if (unlikely(jinode != NULL))
277 jbd2_free_inode(jinode);
278 } 265 }
279 return dquot_file_open(inode, filp); 266 return dquot_file_open(inode, filp);
280} 267}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f03598c6ffd3..8bf5999875ee 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -734,11 +734,8 @@ repeat_in_this_group:
734 ino = ext4_find_next_zero_bit((unsigned long *) 734 ino = ext4_find_next_zero_bit((unsigned long *)
735 inode_bitmap_bh->b_data, 735 inode_bitmap_bh->b_data,
736 EXT4_INODES_PER_GROUP(sb), ino); 736 EXT4_INODES_PER_GROUP(sb), ino);
737 if (ino >= EXT4_INODES_PER_GROUP(sb)) { 737 if (ino >= EXT4_INODES_PER_GROUP(sb))
738 if (++group == ngroups) 738 goto next_group;
739 group = 0;
740 continue;
741 }
742 if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) { 739 if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
743 ext4_error(sb, "reserved inode found cleared - " 740 ext4_error(sb, "reserved inode found cleared - "
744 "inode=%lu", ino + 1); 741 "inode=%lu", ino + 1);
@@ -769,6 +766,9 @@ repeat_in_this_group:
769 goto got; /* we grabbed the inode! */ 766 goto got; /* we grabbed the inode! */
770 if (ino < EXT4_INODES_PER_GROUP(sb)) 767 if (ino < EXT4_INODES_PER_GROUP(sb))
771 goto repeat_in_this_group; 768 goto repeat_in_this_group;
769next_group:
770 if (++group == ngroups)
771 group = 0;
772 } 772 }
773 err = -ENOSPC; 773 err = -ENOSPC;
774 goto out; 774 goto out;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ba33c67d6e48..c2ca04e67a4f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -555,14 +555,13 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
555 int ret; 555 int ret;
556 unsigned long long status; 556 unsigned long long status;
557 557
558#ifdef ES_AGGRESSIVE_TEST 558 if (unlikely(retval != map->m_len)) {
559 if (retval != map->m_len) { 559 ext4_warning(inode->i_sb,
560 printk("ES len assertion failed for inode: %lu " 560 "ES len assertion failed for inode "
561 "retval %d != map->m_len %d " 561 "%lu: retval %d != map->m_len %d",
562 "in %s (lookup)\n", inode->i_ino, retval, 562 inode->i_ino, retval, map->m_len);
563 map->m_len, __func__); 563 WARN_ON(1);
564 } 564 }
565#endif
566 565
567 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 566 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
568 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 567 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -656,14 +655,13 @@ found:
656 int ret; 655 int ret;
657 unsigned long long status; 656 unsigned long long status;
658 657
659#ifdef ES_AGGRESSIVE_TEST 658 if (unlikely(retval != map->m_len)) {
660 if (retval != map->m_len) { 659 ext4_warning(inode->i_sb,
661 printk("ES len assertion failed for inode: %lu " 660 "ES len assertion failed for inode "
662 "retval %d != map->m_len %d " 661 "%lu: retval %d != map->m_len %d",
663 "in %s (allocation)\n", inode->i_ino, retval, 662 inode->i_ino, retval, map->m_len);
664 map->m_len, __func__); 663 WARN_ON(1);
665 } 664 }
666#endif
667 665
668 /* 666 /*
669 * If the extent has been zeroed out, we don't need to update 667 * If the extent has been zeroed out, we don't need to update
@@ -1637,14 +1635,13 @@ add_delayed:
1637 int ret; 1635 int ret;
1638 unsigned long long status; 1636 unsigned long long status;
1639 1637
1640#ifdef ES_AGGRESSIVE_TEST 1638 if (unlikely(retval != map->m_len)) {
1641 if (retval != map->m_len) { 1639 ext4_warning(inode->i_sb,
1642 printk("ES len assertion failed for inode: %lu " 1640 "ES len assertion failed for inode "
1643 "retval %d != map->m_len %d " 1641 "%lu: retval %d != map->m_len %d",
1644 "in %s (lookup)\n", inode->i_ino, retval, 1642 inode->i_ino, retval, map->m_len);
1645 map->m_len, __func__); 1643 WARN_ON(1);
1646 } 1644 }
1647#endif
1648 1645
1649 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 1646 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1650 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 1647 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -3536,6 +3533,18 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3536 offset; 3533 offset;
3537 } 3534 }
3538 3535
3536 if (offset & (sb->s_blocksize - 1) ||
3537 (offset + length) & (sb->s_blocksize - 1)) {
3538 /*
3539 * Attach jinode to inode for jbd2 if we do any zeroing of
3540 * partial block
3541 */
3542 ret = ext4_inode_attach_jinode(inode);
3543 if (ret < 0)
3544 goto out_mutex;
3545
3546 }
3547
3539 first_block_offset = round_up(offset, sb->s_blocksize); 3548 first_block_offset = round_up(offset, sb->s_blocksize);
3540 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 3549 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3541 3550
@@ -3604,6 +3613,31 @@ out_mutex:
3604 return ret; 3613 return ret;
3605} 3614}
3606 3615
3616int ext4_inode_attach_jinode(struct inode *inode)
3617{
3618 struct ext4_inode_info *ei = EXT4_I(inode);
3619 struct jbd2_inode *jinode;
3620
3621 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
3622 return 0;
3623
3624 jinode = jbd2_alloc_inode(GFP_KERNEL);
3625 spin_lock(&inode->i_lock);
3626 if (!ei->jinode) {
3627 if (!jinode) {
3628 spin_unlock(&inode->i_lock);
3629 return -ENOMEM;
3630 }
3631 ei->jinode = jinode;
3632 jbd2_journal_init_jbd_inode(ei->jinode, inode);
3633 jinode = NULL;
3634 }
3635 spin_unlock(&inode->i_lock);
3636 if (unlikely(jinode != NULL))
3637 jbd2_free_inode(jinode);
3638 return 0;
3639}
3640
3607/* 3641/*
3608 * ext4_truncate() 3642 * ext4_truncate()
3609 * 3643 *
@@ -3664,6 +3698,12 @@ void ext4_truncate(struct inode *inode)
3664 return; 3698 return;
3665 } 3699 }
3666 3700
3701 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
3702 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
3703 if (ext4_inode_attach_jinode(inode) < 0)
3704 return;
3705 }
3706
3667 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3707 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3668 credits = ext4_writepage_trans_blocks(inode); 3708 credits = ext4_writepage_trans_blocks(inode);
3669 else 3709 else
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 9491ac0590f7..c0427e2f6648 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
77 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); 77 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
78 memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags)); 78 memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags));
79 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); 79 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
80 memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree)); 80 ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
81 memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr)); 81 ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
82 ext4_es_lru_del(inode1);
83 ext4_es_lru_del(inode2);
82 84
83 isize = i_size_read(inode1); 85 isize = i_size_read(inode1);
84 i_size_write(inode1, i_size_read(inode2)); 86 i_size_write(inode1, i_size_read(inode2));
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index bca26f34edf4..b59373b625e9 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1359,7 +1359,7 @@ static const struct mount_opts {
1359 {Opt_delalloc, EXT4_MOUNT_DELALLOC, 1359 {Opt_delalloc, EXT4_MOUNT_DELALLOC,
1360 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1360 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1361 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, 1361 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1362 MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT}, 1362 MOPT_EXT4_ONLY | MOPT_CLEAR},
1363 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1363 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1364 MOPT_EXT4_ONLY | MOPT_SET}, 1364 MOPT_EXT4_ONLY | MOPT_SET},
1365 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | 1365 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
@@ -3483,7 +3483,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3483 } 3483 }
3484 if (test_opt(sb, DIOREAD_NOLOCK)) { 3484 if (test_opt(sb, DIOREAD_NOLOCK)) {
3485 ext4_msg(sb, KERN_ERR, "can't mount with " 3485 ext4_msg(sb, KERN_ERR, "can't mount with "
3486 "both data=journal and delalloc"); 3486 "both data=journal and dioread_nolock");
3487 goto failed_mount; 3487 goto failed_mount;
3488 } 3488 }
3489 if (test_opt(sb, DELALLOC)) 3489 if (test_opt(sb, DELALLOC))
@@ -4727,6 +4727,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4727 goto restore_opts; 4727 goto restore_opts;
4728 } 4728 }
4729 4729
4730 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
4731 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
4732 ext4_msg(sb, KERN_ERR, "can't mount with "
4733 "both data=journal and delalloc");
4734 err = -EINVAL;
4735 goto restore_opts;
4736 }
4737 if (test_opt(sb, DIOREAD_NOLOCK)) {
4738 ext4_msg(sb, KERN_ERR, "can't mount with "
4739 "both data=journal and dioread_nolock");
4740 err = -EINVAL;
4741 goto restore_opts;
4742 }
4743 }
4744
4730 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) 4745 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
4731 ext4_abort(sb, "Abort forced by user"); 4746 ext4_abort(sb, "Abort forced by user");
4732 4747
@@ -5481,6 +5496,7 @@ static void __exit ext4_exit_fs(void)
5481 kset_unregister(ext4_kset); 5496 kset_unregister(ext4_kset);
5482 ext4_exit_system_zone(); 5497 ext4_exit_system_zone();
5483 ext4_exit_pageio(); 5498 ext4_exit_pageio();
5499 ext4_exit_es();
5484} 5500}
5485 5501
5486MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 5502MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6599222536eb..65343c3741ff 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -730,14 +730,14 @@ static int __init fcntl_init(void)
730 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY 730 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
731 * is defined as O_NONBLOCK on some platforms and not on others. 731 * is defined as O_NONBLOCK on some platforms and not on others.
732 */ 732 */
733 BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( 733 BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
734 O_RDONLY | O_WRONLY | O_RDWR | 734 O_RDONLY | O_WRONLY | O_RDWR |
735 O_CREAT | O_EXCL | O_NOCTTY | 735 O_CREAT | O_EXCL | O_NOCTTY |
736 O_TRUNC | O_APPEND | /* O_NONBLOCK | */ 736 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
737 __O_SYNC | O_DSYNC | FASYNC | 737 __O_SYNC | O_DSYNC | FASYNC |
738 O_DIRECT | O_LARGEFILE | O_DIRECTORY | 738 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
739 O_NOFOLLOW | O_NOATIME | O_CLOEXEC | 739 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
740 __FMODE_EXEC | O_PATH 740 __FMODE_EXEC | O_PATH | __O_TMPFILE
741 )); 741 ));
742 742
743 fasync_cache = kmem_cache_create("fasync_cache", 743 fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 0eda52738ec4..72a5d5b04494 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1223,30 +1223,46 @@ static int fuse_direntplus_link(struct file *file,
1223 if (name.name[1] == '.' && name.len == 2) 1223 if (name.name[1] == '.' && name.len == 2)
1224 return 0; 1224 return 0;
1225 } 1225 }
1226
1227 if (invalid_nodeid(o->nodeid))
1228 return -EIO;
1229 if (!fuse_valid_type(o->attr.mode))
1230 return -EIO;
1231
1226 fc = get_fuse_conn(dir); 1232 fc = get_fuse_conn(dir);
1227 1233
1228 name.hash = full_name_hash(name.name, name.len); 1234 name.hash = full_name_hash(name.name, name.len);
1229 dentry = d_lookup(parent, &name); 1235 dentry = d_lookup(parent, &name);
1230 if (dentry && dentry->d_inode) { 1236 if (dentry) {
1231 inode = dentry->d_inode; 1237 inode = dentry->d_inode;
1232 if (get_node_id(inode) == o->nodeid) { 1238 if (!inode) {
1239 d_drop(dentry);
1240 } else if (get_node_id(inode) != o->nodeid ||
1241 ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
1242 err = d_invalidate(dentry);
1243 if (err)
1244 goto out;
1245 } else if (is_bad_inode(inode)) {
1246 err = -EIO;
1247 goto out;
1248 } else {
1233 struct fuse_inode *fi; 1249 struct fuse_inode *fi;
1234 fi = get_fuse_inode(inode); 1250 fi = get_fuse_inode(inode);
1235 spin_lock(&fc->lock); 1251 spin_lock(&fc->lock);
1236 fi->nlookup++; 1252 fi->nlookup++;
1237 spin_unlock(&fc->lock); 1253 spin_unlock(&fc->lock);
1238 1254
1255 fuse_change_attributes(inode, &o->attr,
1256 entry_attr_timeout(o),
1257 attr_version);
1258
1239 /* 1259 /*
1240 * The other branch to 'found' comes via fuse_iget() 1260 * The other branch to 'found' comes via fuse_iget()
1241 * which bumps nlookup inside 1261 * which bumps nlookup inside
1242 */ 1262 */
1243 goto found; 1263 goto found;
1244 } 1264 }
1245 err = d_invalidate(dentry);
1246 if (err)
1247 goto out;
1248 dput(dentry); 1265 dput(dentry);
1249 dentry = NULL;
1250 } 1266 }
1251 1267
1252 dentry = d_alloc(parent, &name); 1268 dentry = d_alloc(parent, &name);
@@ -1259,25 +1275,30 @@ static int fuse_direntplus_link(struct file *file,
1259 if (!inode) 1275 if (!inode)
1260 goto out; 1276 goto out;
1261 1277
1262 alias = d_materialise_unique(dentry, inode); 1278 if (S_ISDIR(inode->i_mode)) {
1263 err = PTR_ERR(alias); 1279 mutex_lock(&fc->inst_mutex);
1264 if (IS_ERR(alias)) 1280 alias = fuse_d_add_directory(dentry, inode);
1265 goto out; 1281 mutex_unlock(&fc->inst_mutex);
1282 err = PTR_ERR(alias);
1283 if (IS_ERR(alias)) {
1284 iput(inode);
1285 goto out;
1286 }
1287 } else {
1288 alias = d_splice_alias(inode, dentry);
1289 }
1290
1266 if (alias) { 1291 if (alias) {
1267 dput(dentry); 1292 dput(dentry);
1268 dentry = alias; 1293 dentry = alias;
1269 } 1294 }
1270 1295
1271found: 1296found:
1272 fuse_change_attributes(inode, &o->attr, entry_attr_timeout(o),
1273 attr_version);
1274
1275 fuse_change_entry_timeout(dentry, o); 1297 fuse_change_entry_timeout(dentry, o);
1276 1298
1277 err = 0; 1299 err = 0;
1278out: 1300out:
1279 if (dentry) 1301 dput(dentry);
1280 dput(dentry);
1281 return err; 1302 return err;
1282} 1303}
1283 1304
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9435384562a2..544a809819c3 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1838,14 +1838,14 @@ int __init gfs2_glock_init(void)
1838 1838
1839 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1839 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1840 WQ_HIGHPRI | WQ_FREEZABLE, 0); 1840 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1841 if (IS_ERR(glock_workqueue)) 1841 if (!glock_workqueue)
1842 return PTR_ERR(glock_workqueue); 1842 return -ENOMEM;
1843 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1843 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1844 WQ_MEM_RECLAIM | WQ_FREEZABLE, 1844 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1845 0); 1845 0);
1846 if (IS_ERR(gfs2_delete_workqueue)) { 1846 if (!gfs2_delete_workqueue) {
1847 destroy_workqueue(glock_workqueue); 1847 destroy_workqueue(glock_workqueue);
1848 return PTR_ERR(gfs2_delete_workqueue); 1848 return -ENOMEM;
1849 } 1849 }
1850 1850
1851 register_shrinker(&glock_shrinker); 1851 register_shrinker(&glock_shrinker);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 5f2e5224c51c..e2e0a90396e7 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -47,7 +47,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
47 * None of the buffers should be dirty, locked, or pinned. 47 * None of the buffers should be dirty, locked, or pinned.
48 */ 48 */
49 49
50static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 50static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
51 unsigned int nr_revokes)
51{ 52{
52 struct gfs2_sbd *sdp = gl->gl_sbd; 53 struct gfs2_sbd *sdp = gl->gl_sbd;
53 struct list_head *head = &gl->gl_ail_list; 54 struct list_head *head = &gl->gl_ail_list;
@@ -57,7 +58,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
57 58
58 gfs2_log_lock(sdp); 59 gfs2_log_lock(sdp);
59 spin_lock(&sdp->sd_ail_lock); 60 spin_lock(&sdp->sd_ail_lock);
60 list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) { 61 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
62 if (nr_revokes == 0)
63 break;
61 bh = bd->bd_bh; 64 bh = bd->bd_bh;
62 if (bh->b_state & b_state) { 65 if (bh->b_state & b_state) {
63 if (fsync) 66 if (fsync)
@@ -65,6 +68,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
65 gfs2_ail_error(gl, bh); 68 gfs2_ail_error(gl, bh);
66 } 69 }
67 gfs2_trans_add_revoke(sdp, bd); 70 gfs2_trans_add_revoke(sdp, bd);
71 nr_revokes--;
68 } 72 }
69 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 73 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
70 spin_unlock(&sdp->sd_ail_lock); 74 spin_unlock(&sdp->sd_ail_lock);
@@ -91,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
91 WARN_ON_ONCE(current->journal_info); 95 WARN_ON_ONCE(current->journal_info);
92 current->journal_info = &tr; 96 current->journal_info = &tr;
93 97
94 __gfs2_ail_flush(gl, 0); 98 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
95 99
96 gfs2_trans_end(sdp); 100 gfs2_trans_end(sdp);
97 gfs2_log_flush(sdp, NULL); 101 gfs2_log_flush(sdp, NULL);
@@ -101,15 +105,19 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
101{ 105{
102 struct gfs2_sbd *sdp = gl->gl_sbd; 106 struct gfs2_sbd *sdp = gl->gl_sbd;
103 unsigned int revokes = atomic_read(&gl->gl_ail_count); 107 unsigned int revokes = atomic_read(&gl->gl_ail_count);
108 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
104 int ret; 109 int ret;
105 110
106 if (!revokes) 111 if (!revokes)
107 return; 112 return;
108 113
109 ret = gfs2_trans_begin(sdp, 0, revokes); 114 while (revokes > max_revokes)
115 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
116
117 ret = gfs2_trans_begin(sdp, 0, max_revokes);
110 if (ret) 118 if (ret)
111 return; 119 return;
112 __gfs2_ail_flush(gl, fsync); 120 __gfs2_ail_flush(gl, fsync, max_revokes);
113 gfs2_trans_end(sdp); 121 gfs2_trans_end(sdp);
114 gfs2_log_flush(sdp, NULL); 122 gfs2_log_flush(sdp, NULL);
115} 123}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index bbb2715171cd..64915eeae5a7 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -594,7 +594,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
594 } 594 }
595 gfs2_glock_dq_uninit(ghs); 595 gfs2_glock_dq_uninit(ghs);
596 if (IS_ERR(d)) 596 if (IS_ERR(d))
597 return PTR_RET(d); 597 return PTR_ERR(d);
598 return error; 598 return error;
599 } else if (error != -ENOENT) { 599 } else if (error != -ENOENT) {
600 goto fail_gunlock; 600 goto fail_gunlock;
@@ -1750,6 +1750,10 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
1750 struct gfs2_holder gh; 1750 struct gfs2_holder gh;
1751 int ret; 1751 int ret;
1752 1752
1753 /* For selinux during lookup */
1754 if (gfs2_glock_is_locked_by_me(ip->i_gl))
1755 return generic_getxattr(dentry, name, data, size);
1756
1753 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); 1757 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
1754 ret = gfs2_glock_nq(&gh); 1758 ret = gfs2_glock_nq(&gh);
1755 if (ret == 0) { 1759 if (ret == 0) {
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index e04d0e09ee7b..7b0f5043cf24 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -155,7 +155,7 @@ static int __init init_gfs2_fs(void)
155 goto fail_wq; 155 goto fail_wq;
156 156
157 gfs2_control_wq = alloc_workqueue("gfs2_control", 157 gfs2_control_wq = alloc_workqueue("gfs2_control",
158 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0); 158 WQ_UNBOUND | WQ_FREEZABLE, 0);
159 if (!gfs2_control_wq) 159 if (!gfs2_control_wq)
160 goto fail_recovery; 160 goto fail_recovery;
161 161
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a3f868ae3fd4..d19b30ababf1 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
463 return inode; 463 return inode;
464} 464}
465 465
466/*
467 * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
468 * be taken from reclaim -- unlike regular filesystems. This needs an
469 * annotation because huge_pmd_share() does an allocation under
470 * i_mmap_mutex.
471 */
472struct lock_class_key hugetlbfs_i_mmap_mutex_key;
473
466static struct inode *hugetlbfs_get_inode(struct super_block *sb, 474static struct inode *hugetlbfs_get_inode(struct super_block *sb,
467 struct inode *dir, 475 struct inode *dir,
468 umode_t mode, dev_t dev) 476 umode_t mode, dev_t dev)
@@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
474 struct hugetlbfs_inode_info *info; 482 struct hugetlbfs_inode_info *info;
475 inode->i_ino = get_next_ino(); 483 inode->i_ino = get_next_ino();
476 inode_init_owner(inode, dir, mode); 484 inode_init_owner(inode, dir, mode);
485 lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
486 &hugetlbfs_i_mmap_mutex_key);
477 inode->i_mapping->a_ops = &hugetlbfs_aops; 487 inode->i_mapping->a_ops = &hugetlbfs_aops;
478 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; 488 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
479 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 489 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -916,14 +926,8 @@ static int get_hstate_idx(int page_size_log)
916 return h - hstates; 926 return h - hstates;
917} 927}
918 928
919static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
920{
921 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
922 dentry->d_name.name);
923}
924
925static struct dentry_operations anon_ops = { 929static struct dentry_operations anon_ops = {
926 .d_dname = hugetlb_dname 930 .d_dname = simple_dname
927}; 931};
928 932
929/* 933/*
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 01bfe7662751..41e491b8e5d7 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -64,12 +64,17 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
64 nlm_init->protocol, nlm_version, 64 nlm_init->protocol, nlm_version,
65 nlm_init->hostname, nlm_init->noresvport, 65 nlm_init->hostname, nlm_init->noresvport,
66 nlm_init->net); 66 nlm_init->net);
67 if (host == NULL) { 67 if (host == NULL)
68 lockd_down(nlm_init->net); 68 goto out_nohost;
69 return ERR_PTR(-ENOLCK); 69 if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL)
70 } 70 goto out_nobind;
71 71
72 return host; 72 return host;
73out_nobind:
74 nlmclnt_release_host(host);
75out_nohost:
76 lockd_down(nlm_init->net);
77 return ERR_PTR(-ENOLCK);
73} 78}
74EXPORT_SYMBOL_GPL(nlmclnt_init); 79EXPORT_SYMBOL_GPL(nlmclnt_init);
75 80
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 9760ecb9b60f..acd394716349 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -125,14 +125,15 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
125{ 125{
126 struct nlm_args *argp = &req->a_args; 126 struct nlm_args *argp = &req->a_args;
127 struct nlm_lock *lock = &argp->lock; 127 struct nlm_lock *lock = &argp->lock;
128 char *nodename = req->a_host->h_rpcclnt->cl_nodename;
128 129
129 nlmclnt_next_cookie(&argp->cookie); 130 nlmclnt_next_cookie(&argp->cookie);
130 memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh)); 131 memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
131 lock->caller = utsname()->nodename; 132 lock->caller = nodename;
132 lock->oh.data = req->a_owner; 133 lock->oh.data = req->a_owner;
133 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", 134 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
134 (unsigned int)fl->fl_u.nfs_fl.owner->pid, 135 (unsigned int)fl->fl_u.nfs_fl.owner->pid,
135 utsname()->nodename); 136 nodename);
136 lock->svid = fl->fl_u.nfs_fl.owner->pid; 137 lock->svid = fl->fl_u.nfs_fl.owner->pid;
137 lock->fl.fl_start = fl->fl_start; 138 lock->fl.fl_start = fl->fl_start;
138 lock->fl.fl_end = fl->fl_end; 139 lock->fl.fl_end = fl->fl_end;
diff --git a/fs/namei.c b/fs/namei.c
index 8b61d103a8a7..89a612e392eb 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3671,15 +3671,11 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
3671 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) 3671 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
3672 return -EINVAL; 3672 return -EINVAL;
3673 /* 3673 /*
3674 * To use null names we require CAP_DAC_READ_SEARCH 3674 * Using empty names is equivalent to using AT_SYMLINK_FOLLOW
3675 * This ensures that not everyone will be able to create 3675 * on /proc/self/fd/<fd>.
3676 * handlink using the passed filedescriptor.
3677 */ 3676 */
3678 if (flags & AT_EMPTY_PATH) { 3677 if (flags & AT_EMPTY_PATH)
3679 if (!capable(CAP_DAC_READ_SEARCH))
3680 return -ENOENT;
3681 how = LOOKUP_EMPTY; 3678 how = LOOKUP_EMPTY;
3682 }
3683 3679
3684 if (flags & AT_SYMLINK_FOLLOW) 3680 if (flags & AT_SYMLINK_FOLLOW)
3685 how |= LOOKUP_FOLLOW; 3681 how |= LOOKUP_FOLLOW;
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b1ca9ba0b0a..a45ba4f267fe 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
1429 CL_COPY_ALL | CL_PRIVATE); 1429 CL_COPY_ALL | CL_PRIVATE);
1430 namespace_unlock(); 1430 namespace_unlock();
1431 if (IS_ERR(tree)) 1431 if (IS_ERR(tree))
1432 return NULL; 1432 return ERR_CAST(tree);
1433 return &tree->mnt; 1433 return &tree->mnt;
1434} 1434}
1435 1435
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index af6e806044d7..941246f2b43d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -463,7 +463,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
463 unlock_new_inode(inode); 463 unlock_new_inode(inode);
464 } else 464 } else
465 nfs_refresh_inode(inode, fattr); 465 nfs_refresh_inode(inode, fattr);
466 nfs_setsecurity(inode, fattr, label);
467 dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n", 466 dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n",
468 inode->i_sb->s_id, 467 inode->i_sb->s_id,
469 (long long)NFS_FILEID(inode), 468 (long long)NFS_FILEID(inode),
@@ -963,9 +962,15 @@ EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
963static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) 962static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
964{ 963{
965 struct nfs_inode *nfsi = NFS_I(inode); 964 struct nfs_inode *nfsi = NFS_I(inode);
966 965 int ret;
966
967 if (mapping->nrpages != 0) { 967 if (mapping->nrpages != 0) {
968 int ret = invalidate_inode_pages2(mapping); 968 if (S_ISREG(inode->i_mode)) {
969 ret = nfs_sync_mapping(mapping);
970 if (ret < 0)
971 return ret;
972 }
973 ret = invalidate_inode_pages2(mapping);
969 if (ret < 0) 974 if (ret < 0)
970 return ret; 975 return ret;
971 } 976 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index cf11799297c4..108a774095f7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3071,15 +3071,13 @@ struct rpc_clnt *
3071nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 3071nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3072 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 3072 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3073{ 3073{
3074 struct rpc_clnt *client = NFS_CLIENT(dir);
3074 int status; 3075 int status;
3075 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
3076 3076
3077 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); 3077 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3078 if (status < 0) { 3078 if (status < 0)
3079 rpc_shutdown_client(client);
3080 return ERR_PTR(status); 3079 return ERR_PTR(status);
3081 } 3080 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3082 return client;
3083} 3081}
3084 3082
3085static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3083static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c74d6168db99..3850b018815f 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1118,11 +1118,11 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
1118 len, ((char *)p - (char *)q) + 4); 1118 len, ((char *)p - (char *)q) + 4);
1119 BUG(); 1119 BUG();
1120 } 1120 }
1121 len = (char *)p - (char *)q - (bmval_len << 2);
1122 *q++ = htonl(bmval0); 1121 *q++ = htonl(bmval0);
1123 *q++ = htonl(bmval1); 1122 *q++ = htonl(bmval1);
1124 if (bmval_len == 3) 1123 if (bmval_len == 3)
1125 *q++ = htonl(bmval2); 1124 *q++ = htonl(bmval2);
1125 len = (char *)p - (char *)(q + 1);
1126 *q = htonl(len); 1126 *q = htonl(len);
1127 1127
1128/* out: */ 1128/* out: */
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 71fdc0dfa0d2..f6db66d8f647 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2478,6 +2478,10 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
2478 if (server->flags & NFS_MOUNT_NOAC) 2478 if (server->flags & NFS_MOUNT_NOAC)
2479 sb_mntdata.mntflags |= MS_SYNCHRONOUS; 2479 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
2480 2480
2481 if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
2482 if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS)
2483 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
2484
2481 /* Get a superblock - note that we may end up sharing one that already exists */ 2485 /* Get a superblock - note that we may end up sharing one that already exists */
2482 s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata); 2486 s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
2483 if (IS_ERR(s)) { 2487 if (IS_ERR(s)) {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0d4c410e4589..419572f33b72 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1524,7 +1524,7 @@ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
1524static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) 1524static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
1525{ 1525{
1526 return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\ 1526 return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
1527 1 + 1 + 0 + /* eir_flags, spr_how, SP4_NONE (for now) */\ 1527 1 + 1 + 2 + /* eir_flags, spr_how, spo_must_enforce & _allow */\
1528 2 + /*eir_server_owner.so_minor_id */\ 1528 2 + /*eir_server_owner.so_minor_id */\
1529 /* eir_server_owner.so_major_id<> */\ 1529 /* eir_server_owner.so_major_id<> */\
1530 XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\ 1530 XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 280acef6f0dc..43f42290e5df 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1264,6 +1264,8 @@ static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
1264 struct svc_cred *cr = &rqstp->rq_cred; 1264 struct svc_cred *cr = &rqstp->rq_cred;
1265 u32 service; 1265 u32 service;
1266 1266
1267 if (!cr->cr_gss_mech)
1268 return false;
1267 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); 1269 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
1268 return service == RPC_GSS_SVC_INTEGRITY || 1270 return service == RPC_GSS_SVC_INTEGRITY ||
1269 service == RPC_GSS_SVC_PRIVACY; 1271 service == RPC_GSS_SVC_PRIVACY;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0c0f3ea90de5..c2a4701d7286 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3360,7 +3360,8 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
3360 8 /* eir_clientid */ + 3360 8 /* eir_clientid */ +
3361 4 /* eir_sequenceid */ + 3361 4 /* eir_sequenceid */ +
3362 4 /* eir_flags */ + 3362 4 /* eir_flags */ +
3363 4 /* spr_how (SP4_NONE) */ + 3363 4 /* spr_how */ +
3364 8 /* spo_must_enforce, spo_must_allow */ +
3364 8 /* so_minor_id */ + 3365 8 /* so_minor_id */ +
3365 4 /* so_major_id.len */ + 3366 4 /* so_major_id.len */ +
3366 (XDR_QUADLEN(major_id_sz) * 4) + 3367 (XDR_QUADLEN(major_id_sz) * 4) +
@@ -3372,8 +3373,6 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
3372 WRITE32(exid->seqid); 3373 WRITE32(exid->seqid);
3373 WRITE32(exid->flags); 3374 WRITE32(exid->flags);
3374 3375
3375 /* state_protect4_r. Currently only support SP4_NONE */
3376 BUG_ON(exid->spa_how != SP4_NONE);
3377 WRITE32(exid->spa_how); 3376 WRITE32(exid->spa_how);
3378 switch (exid->spa_how) { 3377 switch (exid->spa_how) {
3379 case SP4_NONE: 3378 case SP4_NONE:
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 8ff6a0019b0b..c827acb0e943 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -830,9 +830,10 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
830 flags = O_WRONLY|O_LARGEFILE; 830 flags = O_WRONLY|O_LARGEFILE;
831 } 831 }
832 *filp = dentry_open(&path, flags, current_cred()); 832 *filp = dentry_open(&path, flags, current_cred());
833 if (IS_ERR(*filp)) 833 if (IS_ERR(*filp)) {
834 host_err = PTR_ERR(*filp); 834 host_err = PTR_ERR(*filp);
835 else { 835 *filp = NULL;
836 } else {
836 host_err = ima_file_check(*filp, may_flags); 837 host_err = ima_file_check(*filp, may_flags);
837 838
838 if (may_flags & NFSD_MAY_64BIT_COOKIE) 839 if (may_flags & NFSD_MAY_64BIT_COOKIE)
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index dc9a913784ab..2d8be51f90dc 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
345 345
346 if (err == -EOPNOTSUPP) { 346 if (err == -EOPNOTSUPP) {
347 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 347 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
348 bio_put(bio); 348 /* to be detected by nilfs_segbuf_submit_bio() */
349 /* to be detected by submit_seg_bio() */
350 } 349 }
351 350
352 if (!uptodate) 351 if (!uptodate)
@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
377 bio->bi_private = segbuf; 376 bio->bi_private = segbuf;
378 bio_get(bio); 377 bio_get(bio);
379 submit_bio(mode, bio); 378 submit_bio(mode, bio);
379 segbuf->sb_nbio++;
380 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { 380 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
381 bio_put(bio); 381 bio_put(bio);
382 err = -EOPNOTSUPP; 382 err = -EOPNOTSUPP;
383 goto failed; 383 goto failed;
384 } 384 }
385 segbuf->sb_nbio++;
386 bio_put(bio); 385 bio_put(bio);
387 386
388 wi->bio = NULL; 387 wi->bio = NULL;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 79736a28d84f..2abf97b2a592 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1757,7 +1757,7 @@ try_again:
1757 goto out; 1757 goto out;
1758 } else if (ret == 1) { 1758 } else if (ret == 1) {
1759 clusters_need = wc->w_clen; 1759 clusters_need = wc->w_clen;
1760 ret = ocfs2_refcount_cow(inode, filp, di_bh, 1760 ret = ocfs2_refcount_cow(inode, di_bh,
1761 wc->w_cpos, wc->w_clen, UINT_MAX); 1761 wc->w_cpos, wc->w_clen, UINT_MAX);
1762 if (ret) { 1762 if (ret) {
1763 mlog_errno(ret); 1763 mlog_errno(ret);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index eb760d8acd50..30544ce8e9f7 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode)
2153{ 2153{
2154 int ret; 2154 int ret;
2155 struct ocfs2_empty_dir_priv priv = { 2155 struct ocfs2_empty_dir_priv priv = {
2156 .ctx.actor = ocfs2_empty_dir_filldir 2156 .ctx.actor = ocfs2_empty_dir_filldir,
2157 }; 2157 };
2158 2158
2159 memset(&priv, 0, sizeof(priv));
2160
2161 if (ocfs2_dir_indexed(inode)) { 2159 if (ocfs2_dir_indexed(inode)) {
2162 ret = ocfs2_empty_dir_dx(inode, &priv); 2160 ret = ocfs2_empty_dir_dx(inode, &priv);
2163 if (ret) 2161 if (ret)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 41000f223ca4..3261d71319ee 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
370 if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) 370 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
371 goto out; 371 goto out;
372 372
373 return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1); 373 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
374 374
375out: 375out:
376 return status; 376 return status;
@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
899 zero_clusters = last_cpos - zero_cpos; 899 zero_clusters = last_cpos - zero_cpos;
900 900
901 if (needs_cow) { 901 if (needs_cow) {
902 rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos, 902 rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
903 zero_clusters, UINT_MAX); 903 zero_clusters, UINT_MAX);
904 if (rc) { 904 if (rc) {
905 mlog_errno(rc); 905 mlog_errno(rc);
@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2078 2078
2079 *meta_level = 1; 2079 *meta_level = 1;
2080 2080
2081 ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX); 2081 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2082 if (ret) 2082 if (ret)
2083 mlog_errno(ret); 2083 mlog_errno(ret);
2084out: 2084out:
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 96f9ac237e86..0a992737dcaf 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb,
537 extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); 537 extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth);
538 538
539 return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + 539 return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks +
540 ocfs2_quota_trans_credits(sb) + bits_wanted; 540 ocfs2_quota_trans_credits(sb);
541} 541}
542 542
543static inline int ocfs2_calc_symlink_credits(struct super_block *sb) 543static inline int ocfs2_calc_symlink_credits(struct super_block *sb)
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index f1fc172175b6..452068b45749 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); 69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
70 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); 70 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
71 71
72 ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, 72 ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
73 p_cpos, new_p_cpos, len); 73 p_cpos, new_p_cpos, len);
74 if (ret) { 74 if (ret) {
75 mlog_errno(ret); 75 mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 998b17eda09d..a70d604593b6 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -49,7 +49,6 @@
49 49
50struct ocfs2_cow_context { 50struct ocfs2_cow_context {
51 struct inode *inode; 51 struct inode *inode;
52 struct file *file;
53 u32 cow_start; 52 u32 cow_start;
54 u32 cow_len; 53 u32 cow_len;
55 struct ocfs2_extent_tree data_et; 54 struct ocfs2_extent_tree data_et;
@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
66 u32 *num_clusters, 65 u32 *num_clusters,
67 unsigned int *extent_flags); 66 unsigned int *extent_flags);
68 int (*cow_duplicate_clusters)(handle_t *handle, 67 int (*cow_duplicate_clusters)(handle_t *handle,
69 struct file *file, 68 struct inode *inode,
70 u32 cpos, u32 old_cluster, 69 u32 cpos, u32 old_cluster,
71 u32 new_cluster, u32 new_len); 70 u32 new_cluster, u32 new_len);
72}; 71};
@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
2922} 2921}
2923 2922
2924int ocfs2_duplicate_clusters_by_page(handle_t *handle, 2923int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2925 struct file *file, 2924 struct inode *inode,
2926 u32 cpos, u32 old_cluster, 2925 u32 cpos, u32 old_cluster,
2927 u32 new_cluster, u32 new_len) 2926 u32 new_cluster, u32 new_len)
2928{ 2927{
2929 int ret = 0, partial; 2928 int ret = 0, partial;
2930 struct inode *inode = file_inode(file); 2929 struct super_block *sb = inode->i_sb;
2931 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
2932 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2933 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 2930 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
2934 struct page *page; 2931 struct page *page;
2935 pgoff_t page_index; 2932 pgoff_t page_index;
@@ -2965,6 +2962,11 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2965 to = map_end & (PAGE_CACHE_SIZE - 1); 2962 to = map_end & (PAGE_CACHE_SIZE - 1);
2966 2963
2967 page = find_or_create_page(mapping, page_index, GFP_NOFS); 2964 page = find_or_create_page(mapping, page_index, GFP_NOFS);
2965 if (!page) {
2966 ret = -ENOMEM;
2967 mlog_errno(ret);
2968 break;
2969 }
2968 2970
2969 /* 2971 /*
2970 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page 2972 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -2973,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2973 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2975 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2974 BUG_ON(PageDirty(page)); 2976 BUG_ON(PageDirty(page));
2975 2977
2976 if (PageReadahead(page)) {
2977 page_cache_async_readahead(mapping,
2978 &file->f_ra, file,
2979 page, page_index,
2980 readahead_pages);
2981 }
2982
2983 if (!PageUptodate(page)) { 2978 if (!PageUptodate(page)) {
2984 ret = block_read_full_page(page, ocfs2_get_block); 2979 ret = block_read_full_page(page, ocfs2_get_block);
2985 if (ret) { 2980 if (ret) {
@@ -2999,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2999 } 2994 }
3000 } 2995 }
3001 2996
3002 ocfs2_map_and_dirty_page(inode, handle, from, to, 2997 ocfs2_map_and_dirty_page(inode,
2998 handle, from, to,
3003 page, 0, &new_block); 2999 page, 0, &new_block);
3004 mark_page_accessed(page); 3000 mark_page_accessed(page);
3005unlock: 3001unlock:
@@ -3015,12 +3011,11 @@ unlock:
3015} 3011}
3016 3012
3017int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 3013int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
3018 struct file *file, 3014 struct inode *inode,
3019 u32 cpos, u32 old_cluster, 3015 u32 cpos, u32 old_cluster,
3020 u32 new_cluster, u32 new_len) 3016 u32 new_cluster, u32 new_len)
3021{ 3017{
3022 int ret = 0; 3018 int ret = 0;
3023 struct inode *inode = file_inode(file);
3024 struct super_block *sb = inode->i_sb; 3019 struct super_block *sb = inode->i_sb;
3025 struct ocfs2_caching_info *ci = INODE_CACHE(inode); 3020 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
3026 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); 3021 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
@@ -3145,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
3145 3140
3146 /*If the old clusters is unwritten, no need to duplicate. */ 3141 /*If the old clusters is unwritten, no need to duplicate. */
3147 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { 3142 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
3148 ret = context->cow_duplicate_clusters(handle, context->file, 3143 ret = context->cow_duplicate_clusters(handle, context->inode,
3149 cpos, old, new, len); 3144 cpos, old, new, len);
3150 if (ret) { 3145 if (ret) {
3151 mlog_errno(ret); 3146 mlog_errno(ret);
@@ -3423,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
3423 return ret; 3418 return ret;
3424} 3419}
3425 3420
3426static void ocfs2_readahead_for_cow(struct inode *inode,
3427 struct file *file,
3428 u32 start, u32 len)
3429{
3430 struct address_space *mapping;
3431 pgoff_t index;
3432 unsigned long num_pages;
3433 int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
3434
3435 if (!file)
3436 return;
3437
3438 mapping = file->f_mapping;
3439 num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
3440 if (!num_pages)
3441 num_pages = 1;
3442
3443 index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
3444 page_cache_sync_readahead(mapping, &file->f_ra, file,
3445 index, num_pages);
3446}
3447
3448/* 3421/*
3449 * Starting at cpos, try to CoW write_len clusters. Don't CoW 3422 * Starting at cpos, try to CoW write_len clusters. Don't CoW
3450 * past max_cpos. This will stop when it runs into a hole or an 3423 * past max_cpos. This will stop when it runs into a hole or an
3451 * unrefcounted extent. 3424 * unrefcounted extent.
3452 */ 3425 */
3453static int ocfs2_refcount_cow_hunk(struct inode *inode, 3426static int ocfs2_refcount_cow_hunk(struct inode *inode,
3454 struct file *file,
3455 struct buffer_head *di_bh, 3427 struct buffer_head *di_bh,
3456 u32 cpos, u32 write_len, u32 max_cpos) 3428 u32 cpos, u32 write_len, u32 max_cpos)
3457{ 3429{
@@ -3480,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
3480 3452
3481 BUG_ON(cow_len == 0); 3453 BUG_ON(cow_len == 0);
3482 3454
3483 ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
3484
3485 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3455 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3486 if (!context) { 3456 if (!context) {
3487 ret = -ENOMEM; 3457 ret = -ENOMEM;
@@ -3503,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
3503 context->ref_root_bh = ref_root_bh; 3473 context->ref_root_bh = ref_root_bh;
3504 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; 3474 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
3505 context->get_clusters = ocfs2_di_get_clusters; 3475 context->get_clusters = ocfs2_di_get_clusters;
3506 context->file = file;
3507 3476
3508 ocfs2_init_dinode_extent_tree(&context->data_et, 3477 ocfs2_init_dinode_extent_tree(&context->data_et,
3509 INODE_CACHE(inode), di_bh); 3478 INODE_CACHE(inode), di_bh);
@@ -3532,7 +3501,6 @@ out:
3532 * clusters between cpos and cpos+write_len are safe to modify. 3501 * clusters between cpos and cpos+write_len are safe to modify.
3533 */ 3502 */
3534int ocfs2_refcount_cow(struct inode *inode, 3503int ocfs2_refcount_cow(struct inode *inode,
3535 struct file *file,
3536 struct buffer_head *di_bh, 3504 struct buffer_head *di_bh,
3537 u32 cpos, u32 write_len, u32 max_cpos) 3505 u32 cpos, u32 write_len, u32 max_cpos)
3538{ 3506{
@@ -3552,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode,
3552 num_clusters = write_len; 3520 num_clusters = write_len;
3553 3521
3554 if (ext_flags & OCFS2_EXT_REFCOUNTED) { 3522 if (ext_flags & OCFS2_EXT_REFCOUNTED) {
3555 ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos, 3523 ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
3556 num_clusters, max_cpos); 3524 num_clusters, max_cpos);
3557 if (ret) { 3525 if (ret) {
3558 mlog_errno(ret); 3526 mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
index 7754608c83a4..6422bbcdb525 100644
--- a/fs/ocfs2/refcounttree.h
+++ b/fs/ocfs2/refcounttree.h
@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
53 int *credits, 53 int *credits,
54 int *ref_blocks); 54 int *ref_blocks);
55int ocfs2_refcount_cow(struct inode *inode, 55int ocfs2_refcount_cow(struct inode *inode,
56 struct file *filep, struct buffer_head *di_bh, 56 struct buffer_head *di_bh,
57 u32 cpos, u32 write_len, u32 max_cpos); 57 u32 cpos, u32 write_len, u32 max_cpos);
58 58
59typedef int (ocfs2_post_refcount_func)(struct inode *inode, 59typedef int (ocfs2_post_refcount_func)(struct inode *inode,
@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
85 u32 cpos, u32 write_len, 85 u32 cpos, u32 write_len,
86 struct ocfs2_post_refcount *post); 86 struct ocfs2_post_refcount *post);
87int ocfs2_duplicate_clusters_by_page(handle_t *handle, 87int ocfs2_duplicate_clusters_by_page(handle_t *handle,
88 struct file *file, 88 struct inode *inode,
89 u32 cpos, u32 old_cluster, 89 u32 cpos, u32 old_cluster,
90 u32 new_cluster, u32 new_len); 90 u32 new_cluster, u32 new_len);
91int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 91int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
92 struct file *file, 92 struct inode *inode,
93 u32 cpos, u32 old_cluster, 93 u32 cpos, u32 old_cluster,
94 u32 new_cluster, u32 new_len); 94 u32 new_cluster, u32 new_len);
95int ocfs2_cow_sync_writeback(struct super_block *sb, 95int ocfs2_cow_sync_writeback(struct super_block *sb,
diff --git a/fs/open.c b/fs/open.c
index d53e29895082..7931f76acc2b 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -823,7 +823,7 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
823 int lookup_flags = 0; 823 int lookup_flags = 0;
824 int acc_mode; 824 int acc_mode;
825 825
826 if (flags & O_CREAT) 826 if (flags & (O_CREAT | __O_TMPFILE))
827 op->mode = (mode & S_IALLUGO) | S_IFREG; 827 op->mode = (mode & S_IALLUGO) | S_IFREG;
828 else 828 else
829 op->mode = 0; 829 op->mode = 0;
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 75f2890abbd8..0ff80f9b930f 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -230,8 +230,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
230 230
231 if (!dir_emit_dots(file, ctx)) 231 if (!dir_emit_dots(file, ctx))
232 goto out; 232 goto out;
233 if (!dir_emit_dots(file, ctx))
234 goto out;
235 files = get_files_struct(p); 233 files = get_files_struct(p);
236 if (!files) 234 if (!files)
237 goto out; 235 goto out;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 94441a407337..737e15615b04 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -271,7 +271,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
271 de = next; 271 de = next;
272 } while (de); 272 } while (de);
273 spin_unlock(&proc_subdir_lock); 273 spin_unlock(&proc_subdir_lock);
274 return 0; 274 return 1;
275} 275}
276 276
277int proc_readdir(struct file *file, struct dir_context *ctx) 277int proc_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 229e366598da..e0a790da726d 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -205,7 +205,9 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr
205static int proc_root_readdir(struct file *file, struct dir_context *ctx) 205static int proc_root_readdir(struct file *file, struct dir_context *ctx)
206{ 206{
207 if (ctx->pos < FIRST_PROCESS_ENTRY) { 207 if (ctx->pos < FIRST_PROCESS_ENTRY) {
208 proc_readdir(file, ctx); 208 int error = proc_readdir(file, ctx);
209 if (unlikely(error <= 0))
210 return error;
209 ctx->pos = FIRST_PROCESS_ENTRY; 211 ctx->pos = FIRST_PROCESS_ENTRY;
210 } 212 }
211 213
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dbf61f6174f0..107d026f5d6e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
730 * of how soft-dirty works. 730 * of how soft-dirty works.
731 */ 731 */
732 pte_t ptent = *pte; 732 pte_t ptent = *pte;
733 ptent = pte_wrprotect(ptent); 733
734 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 734 if (pte_present(ptent)) {
735 ptent = pte_wrprotect(ptent);
736 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
737 } else if (is_swap_pte(ptent)) {
738 ptent = pte_swp_clear_soft_dirty(ptent);
739 } else if (pte_file(ptent)) {
740 ptent = pte_file_clear_soft_dirty(ptent);
741 }
742
735 set_pte_at(vma->vm_mm, addr, pte, ptent); 743 set_pte_at(vma->vm_mm, addr, pte, ptent);
736#endif 744#endif
737} 745}
@@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
752 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 760 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
753 for (; addr != end; pte++, addr += PAGE_SIZE) { 761 for (; addr != end; pte++, addr += PAGE_SIZE) {
754 ptent = *pte; 762 ptent = *pte;
755 if (!pte_present(ptent))
756 continue;
757 763
758 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 764 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
759 clear_soft_dirty(vma, addr, pte); 765 clear_soft_dirty(vma, addr, pte);
760 continue; 766 continue;
761 } 767 }
762 768
769 if (!pte_present(ptent))
770 continue;
771
763 page = vm_normal_page(vma, addr, ptent); 772 page = vm_normal_page(vma, addr, ptent);
764 if (!page) 773 if (!page)
765 continue; 774 continue;
@@ -859,7 +868,7 @@ typedef struct {
859} pagemap_entry_t; 868} pagemap_entry_t;
860 869
861struct pagemapread { 870struct pagemapread {
862 int pos, len; 871 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
863 pagemap_entry_t *buffer; 872 pagemap_entry_t *buffer;
864 bool v2; 873 bool v2;
865}; 874};
@@ -867,7 +876,7 @@ struct pagemapread {
867#define PAGEMAP_WALK_SIZE (PMD_SIZE) 876#define PAGEMAP_WALK_SIZE (PMD_SIZE)
868#define PAGEMAP_WALK_MASK (PMD_MASK) 877#define PAGEMAP_WALK_MASK (PMD_MASK)
869 878
870#define PM_ENTRY_BYTES sizeof(u64) 879#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
871#define PM_STATUS_BITS 3 880#define PM_STATUS_BITS 3
872#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) 881#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
873#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) 882#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
@@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
930 flags = PM_PRESENT; 939 flags = PM_PRESENT;
931 page = vm_normal_page(vma, addr, pte); 940 page = vm_normal_page(vma, addr, pte);
932 } else if (is_swap_pte(pte)) { 941 } else if (is_swap_pte(pte)) {
933 swp_entry_t entry = pte_to_swp_entry(pte); 942 swp_entry_t entry;
934 943 if (pte_swp_soft_dirty(pte))
944 flags2 |= __PM_SOFT_DIRTY;
945 entry = pte_to_swp_entry(pte);
935 frame = swp_type(entry) | 946 frame = swp_type(entry) |
936 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 947 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
937 flags = PM_SWAP; 948 flags = PM_SWAP;
@@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
1116 goto out_task; 1127 goto out_task;
1117 1128
1118 pm.v2 = soft_dirty_cleared; 1129 pm.v2 = soft_dirty_cleared;
1119 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1130 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1120 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 1131 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1121 ret = -ENOMEM; 1132 ret = -ENOMEM;
1122 if (!pm.buffer) 1133 if (!pm.buffer)
1123 goto out_task; 1134 goto out_task;
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 33532f79b4f7..a958444a75fc 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -19,12 +19,13 @@
19/* 19/*
20 * LOCKING: 20 * LOCKING:
21 * 21 *
22 * We rely on new Alexander Viro's super-block locking. 22 * These guys are evicted from procfs as the very first step in ->kill_sb().
23 * 23 *
24 */ 24 */
25 25
26static int show_version(struct seq_file *m, struct super_block *sb) 26static int show_version(struct seq_file *m, void *unused)
27{ 27{
28 struct super_block *sb = m->private;
28 char *format; 29 char *format;
29 30
30 if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) { 31 if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) {
@@ -66,8 +67,9 @@ static int show_version(struct seq_file *m, struct super_block *sb)
66#define DJP( x ) le32_to_cpu( jp -> x ) 67#define DJP( x ) le32_to_cpu( jp -> x )
67#define JF( x ) ( r -> s_journal -> x ) 68#define JF( x ) ( r -> s_journal -> x )
68 69
69static int show_super(struct seq_file *m, struct super_block *sb) 70static int show_super(struct seq_file *m, void *unused)
70{ 71{
72 struct super_block *sb = m->private;
71 struct reiserfs_sb_info *r = REISERFS_SB(sb); 73 struct reiserfs_sb_info *r = REISERFS_SB(sb);
72 74
73 seq_printf(m, "state: \t%s\n" 75 seq_printf(m, "state: \t%s\n"
@@ -128,8 +130,9 @@ static int show_super(struct seq_file *m, struct super_block *sb)
128 return 0; 130 return 0;
129} 131}
130 132
131static int show_per_level(struct seq_file *m, struct super_block *sb) 133static int show_per_level(struct seq_file *m, void *unused)
132{ 134{
135 struct super_block *sb = m->private;
133 struct reiserfs_sb_info *r = REISERFS_SB(sb); 136 struct reiserfs_sb_info *r = REISERFS_SB(sb);
134 int level; 137 int level;
135 138
@@ -186,8 +189,9 @@ static int show_per_level(struct seq_file *m, struct super_block *sb)
186 return 0; 189 return 0;
187} 190}
188 191
189static int show_bitmap(struct seq_file *m, struct super_block *sb) 192static int show_bitmap(struct seq_file *m, void *unused)
190{ 193{
194 struct super_block *sb = m->private;
191 struct reiserfs_sb_info *r = REISERFS_SB(sb); 195 struct reiserfs_sb_info *r = REISERFS_SB(sb);
192 196
193 seq_printf(m, "free_block: %lu\n" 197 seq_printf(m, "free_block: %lu\n"
@@ -218,8 +222,9 @@ static int show_bitmap(struct seq_file *m, struct super_block *sb)
218 return 0; 222 return 0;
219} 223}
220 224
221static int show_on_disk_super(struct seq_file *m, struct super_block *sb) 225static int show_on_disk_super(struct seq_file *m, void *unused)
222{ 226{
227 struct super_block *sb = m->private;
223 struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); 228 struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
224 struct reiserfs_super_block *rs = sb_info->s_rs; 229 struct reiserfs_super_block *rs = sb_info->s_rs;
225 int hash_code = DFL(s_hash_function_code); 230 int hash_code = DFL(s_hash_function_code);
@@ -261,8 +266,9 @@ static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
261 return 0; 266 return 0;
262} 267}
263 268
264static int show_oidmap(struct seq_file *m, struct super_block *sb) 269static int show_oidmap(struct seq_file *m, void *unused)
265{ 270{
271 struct super_block *sb = m->private;
266 struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); 272 struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
267 struct reiserfs_super_block *rs = sb_info->s_rs; 273 struct reiserfs_super_block *rs = sb_info->s_rs;
268 unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize); 274 unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize);
@@ -291,8 +297,9 @@ static int show_oidmap(struct seq_file *m, struct super_block *sb)
291 return 0; 297 return 0;
292} 298}
293 299
294static int show_journal(struct seq_file *m, struct super_block *sb) 300static int show_journal(struct seq_file *m, void *unused)
295{ 301{
302 struct super_block *sb = m->private;
296 struct reiserfs_sb_info *r = REISERFS_SB(sb); 303 struct reiserfs_sb_info *r = REISERFS_SB(sb);
297 struct reiserfs_super_block *rs = r->s_rs; 304 struct reiserfs_super_block *rs = r->s_rs;
298 struct journal_params *jp = &rs->s_v1.s_journal; 305 struct journal_params *jp = &rs->s_v1.s_journal;
@@ -383,92 +390,24 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
383 return 0; 390 return 0;
384} 391}
385 392
386/* iterator */
387static int test_sb(struct super_block *sb, void *data)
388{
389 return data == sb;
390}
391
392static int set_sb(struct super_block *sb, void *data)
393{
394 return -ENOENT;
395}
396
397struct reiserfs_seq_private {
398 struct super_block *sb;
399 int (*show) (struct seq_file *, struct super_block *);
400};
401
402static void *r_start(struct seq_file *m, loff_t * pos)
403{
404 struct reiserfs_seq_private *priv = m->private;
405 loff_t l = *pos;
406
407 if (l)
408 return NULL;
409
410 if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, priv->sb)))
411 return NULL;
412
413 up_write(&priv->sb->s_umount);
414 return priv->sb;
415}
416
417static void *r_next(struct seq_file *m, void *v, loff_t * pos)
418{
419 ++*pos;
420 if (v)
421 deactivate_super(v);
422 return NULL;
423}
424
425static void r_stop(struct seq_file *m, void *v)
426{
427 if (v)
428 deactivate_super(v);
429}
430
431static int r_show(struct seq_file *m, void *v)
432{
433 struct reiserfs_seq_private *priv = m->private;
434 return priv->show(m, v);
435}
436
437static const struct seq_operations r_ops = {
438 .start = r_start,
439 .next = r_next,
440 .stop = r_stop,
441 .show = r_show,
442};
443
444static int r_open(struct inode *inode, struct file *file) 393static int r_open(struct inode *inode, struct file *file)
445{ 394{
446 struct reiserfs_seq_private *priv; 395 return single_open(file, PDE_DATA(inode),
447 int ret = seq_open_private(file, &r_ops, 396 proc_get_parent_data(inode));
448 sizeof(struct reiserfs_seq_private));
449
450 if (!ret) {
451 struct seq_file *m = file->private_data;
452 priv = m->private;
453 priv->sb = proc_get_parent_data(inode);
454 priv->show = PDE_DATA(inode);
455 }
456 return ret;
457} 397}
458 398
459static const struct file_operations r_file_operations = { 399static const struct file_operations r_file_operations = {
460 .open = r_open, 400 .open = r_open,
461 .read = seq_read, 401 .read = seq_read,
462 .llseek = seq_lseek, 402 .llseek = seq_lseek,
463 .release = seq_release_private, 403 .release = single_release,
464 .owner = THIS_MODULE,
465}; 404};
466 405
467static struct proc_dir_entry *proc_info_root = NULL; 406static struct proc_dir_entry *proc_info_root = NULL;
468static const char proc_info_root_name[] = "fs/reiserfs"; 407static const char proc_info_root_name[] = "fs/reiserfs";
469 408
470static void add_file(struct super_block *sb, char *name, 409static void add_file(struct super_block *sb, char *name,
471 int (*func) (struct seq_file *, struct super_block *)) 410 int (*func) (struct seq_file *, void *))
472{ 411{
473 proc_create_data(name, 0, REISERFS_SB(sb)->procdir, 412 proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
474 &r_file_operations, func); 413 &r_file_operations, func);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f8a23c3078f8..e2e202a07b31 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -499,6 +499,7 @@ int remove_save_link(struct inode *inode, int truncate)
499static void reiserfs_kill_sb(struct super_block *s) 499static void reiserfs_kill_sb(struct super_block *s)
500{ 500{
501 if (REISERFS_SB(s)) { 501 if (REISERFS_SB(s)) {
502 reiserfs_proc_info_done(s);
502 /* 503 /*
503 * Force any pending inode evictions to occur now. Any 504 * Force any pending inode evictions to occur now. Any
504 * inodes to be removed that have extended attributes 505 * inodes to be removed that have extended attributes
@@ -554,8 +555,6 @@ static void reiserfs_put_super(struct super_block *s)
554 REISERFS_SB(s)->reserved_blocks); 555 REISERFS_SB(s)->reserved_blocks);
555 } 556 }
556 557
557 reiserfs_proc_info_done(s);
558
559 reiserfs_write_unlock(s); 558 reiserfs_write_unlock(s);
560 mutex_destroy(&REISERFS_SB(s)->lock); 559 mutex_destroy(&REISERFS_SB(s)->lock);
561 kfree(s->s_fs_info); 560 kfree(s->s_fs_info);
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index 07d735a80a0f..e5869b50dc41 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -39,6 +39,9 @@ typedef struct xfs_timestamp {
39 * There is a very similar struct icdinode in xfs_inode which matches the 39 * There is a very similar struct icdinode in xfs_inode which matches the
40 * layout of the first 96 bytes of this structure, but is kept in native 40 * layout of the first 96 bytes of this structure, but is kept in native
41 * format instead of big endian. 41 * format instead of big endian.
42 *
43 * Note: di_flushiter is only used by v1/2 inodes - it's effectively a zeroed
44 * padding field for v3 inodes.
42 */ 45 */
43typedef struct xfs_dinode { 46typedef struct xfs_dinode {
44 __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */ 47 __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b78481f99d9d..bb262c25c8de 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -896,7 +896,6 @@ xfs_dinode_to_disk(
896 to->di_projid_lo = cpu_to_be16(from->di_projid_lo); 896 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
897 to->di_projid_hi = cpu_to_be16(from->di_projid_hi); 897 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
898 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 898 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
899 to->di_flushiter = cpu_to_be16(from->di_flushiter);
900 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); 899 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
901 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); 900 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
902 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); 901 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
@@ -924,6 +923,9 @@ xfs_dinode_to_disk(
924 to->di_lsn = cpu_to_be64(from->di_lsn); 923 to->di_lsn = cpu_to_be64(from->di_lsn);
925 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); 924 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
926 uuid_copy(&to->di_uuid, &from->di_uuid); 925 uuid_copy(&to->di_uuid, &from->di_uuid);
926 to->di_flushiter = 0;
927 } else {
928 to->di_flushiter = cpu_to_be16(from->di_flushiter);
927 } 929 }
928} 930}
929 931
@@ -1029,10 +1031,14 @@ xfs_dinode_calc_crc(
1029/* 1031/*
1030 * Read the disk inode attributes into the in-core inode structure. 1032 * Read the disk inode attributes into the in-core inode structure.
1031 * 1033 *
1032 * If we are initialising a new inode and we are not utilising the 1034 * For version 5 superblocks, if we are initialising a new inode and we are not
1033 * XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new inode core 1035 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
1034 * with a random generation number. If we are keeping inodes around, we need to 1036 * inode core with a random generation number. If we are keeping inodes around,
1035 * read the inode cluster to get the existing generation number off disk. 1037 * we need to read the inode cluster to get the existing generation number off
1038 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
1039 * format) then log recovery is dependent on the di_flushiter field being
1040 * initialised from the current on-disk value and hence we must also read the
1041 * inode off disk.
1036 */ 1042 */
1037int 1043int
1038xfs_iread( 1044xfs_iread(
@@ -1054,6 +1060,7 @@ xfs_iread(
1054 1060
1055 /* shortcut IO on inode allocation if possible */ 1061 /* shortcut IO on inode allocation if possible */
1056 if ((iget_flags & XFS_IGET_CREATE) && 1062 if ((iget_flags & XFS_IGET_CREATE) &&
1063 xfs_sb_version_hascrc(&mp->m_sb) &&
1057 !(mp->m_flags & XFS_MOUNT_IKEEP)) { 1064 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
1058 /* initialise the on-disk inode core */ 1065 /* initialise the on-disk inode core */
1059 memset(&ip->i_d, 0, sizeof(ip->i_d)); 1066 memset(&ip->i_d, 0, sizeof(ip->i_d));
@@ -2882,12 +2889,18 @@ xfs_iflush_int(
2882 __func__, ip->i_ino, ip->i_d.di_forkoff, ip); 2889 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
2883 goto corrupt_out; 2890 goto corrupt_out;
2884 } 2891 }
2892
2885 /* 2893 /*
2886 * bump the flush iteration count, used to detect flushes which 2894 * Inode item log recovery for v1/v2 inodes are dependent on the
2887 * postdate a log record during recovery. This is redundant as we now 2895 * di_flushiter count for correct sequencing. We bump the flush
2888 * log every change and hence this can't happen. Still, it doesn't hurt. 2896 * iteration count so we can detect flushes which postdate a log record
2897 * during recovery. This is redundant as we now log every change and
2898 * hence this can't happen but we need to still do it to ensure
2899 * backwards compatibility with old kernels that predate logging all
2900 * inode changes.
2889 */ 2901 */
2890 ip->i_d.di_flushiter++; 2902 if (ip->i_d.di_version < 3)
2903 ip->i_d.di_flushiter++;
2891 2904
2892 /* 2905 /*
2893 * Copy the dirty parts of the inode into the on-disk 2906 * Copy the dirty parts of the inode into the on-disk
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 6fcc910a50b9..7681b19aa5dc 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2592,8 +2592,16 @@ xlog_recover_inode_pass2(
2592 goto error; 2592 goto error;
2593 } 2593 }
2594 2594
2595 /* Skip replay when the on disk inode is newer than the log one */ 2595 /*
2596 if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) { 2596 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2597 * are transactional and if ordering is necessary we can determine that
2598 * more accurately by the LSN field in the V3 inode core. Don't trust
2599 * the inode versions we might be changing them here - use the
2600 * superblock flag to determine whether we need to look at di_flushiter
2601 * to skip replay when the on disk inode is newer than the log one
2602 */
2603 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2604 dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2597 /* 2605 /*
2598 * Deal with the wrap case, DI_MAX_FLUSH is less 2606 * Deal with the wrap case, DI_MAX_FLUSH is less
2599 * than smaller numbers 2607 * than smaller numbers
@@ -2608,6 +2616,7 @@ xlog_recover_inode_pass2(
2608 goto error; 2616 goto error;
2609 } 2617 }
2610 } 2618 }
2619
2611 /* Take the opportunity to reset the flush iteration count */ 2620 /* Take the opportunity to reset the flush iteration count */
2612 dicp->di_flushiter = 0; 2621 dicp->di_flushiter = 0;
2613 2622
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 56e6b68c8d2f..94383a70c1a3 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -274,15 +274,12 @@ struct acpi_device_wakeup {
274}; 274};
275 275
276struct acpi_device_physical_node { 276struct acpi_device_physical_node {
277 u8 node_id; 277 unsigned int node_id;
278 struct list_head node; 278 struct list_head node;
279 struct device *dev; 279 struct device *dev;
280 bool put_online:1; 280 bool put_online:1;
281}; 281};
282 282
283/* set maximum of physical nodes to 32 for expansibility */
284#define ACPI_MAX_PHYSICAL_NODE 32
285
286/* Device */ 283/* Device */
287struct acpi_device { 284struct acpi_device {
288 int device_type; 285 int device_type;
@@ -302,10 +299,9 @@ struct acpi_device {
302 struct acpi_driver *driver; 299 struct acpi_driver *driver;
303 void *driver_data; 300 void *driver_data;
304 struct device dev; 301 struct device dev;
305 u8 physical_node_count; 302 unsigned int physical_node_count;
306 struct list_head physical_node_list; 303 struct list_head physical_node_list;
307 struct mutex physical_node_lock; 304 struct mutex physical_node_lock;
308 DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE);
309 struct list_head power_dependent; 305 struct list_head power_dependent;
310 void (*remove)(struct acpi_device *); 306 void (*remove)(struct acpi_device *);
311}; 307};
@@ -445,7 +441,11 @@ struct acpi_pci_root {
445}; 441};
446 442
447/* helper */ 443/* helper */
448acpi_handle acpi_get_child(acpi_handle, u64); 444acpi_handle acpi_find_child(acpi_handle, u64, bool);
445static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
446{
447 return acpi_find_child(handle, addr, false);
448}
449int acpi_is_root_bridge(acpi_handle); 449int acpi_is_root_bridge(acpi_handle);
450struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); 450struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
451#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev)) 451#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
diff --git a/include/acpi/video.h b/include/acpi/video.h
index b26dc4fb7ba8..61109f2609fc 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -17,21 +17,12 @@ struct acpi_device;
17#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200 17#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
18 18
19#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) 19#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
20extern int __acpi_video_register(bool backlight_quirks); 20extern int acpi_video_register(void);
21static inline int acpi_video_register(void)
22{
23 return __acpi_video_register(false);
24}
25static inline int acpi_video_register_with_quirks(void)
26{
27 return __acpi_video_register(true);
28}
29extern void acpi_video_unregister(void); 21extern void acpi_video_unregister(void);
30extern int acpi_video_get_edid(struct acpi_device *device, int type, 22extern int acpi_video_get_edid(struct acpi_device *device, int type,
31 int device_id, void **edid); 23 int device_id, void **edid);
32#else 24#else
33static inline int acpi_video_register(void) { return 0; } 25static inline int acpi_video_register(void) { return 0; }
34static inline int acpi_video_register_with_quirks(void) { return 0; }
35static inline void acpi_video_unregister(void) { return; } 26static inline void acpi_video_unregister(void) { return; }
36static inline int acpi_video_get_edid(struct acpi_device *device, int type, 27static inline int acpi_video_get_edid(struct acpi_device *device, int type,
37 int device_id, void **edid) 28 int device_id, void **edid)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2f47ade1b567..0807ddf97b05 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
417{ 417{
418 return pmd; 418 return pmd;
419} 419}
420
421static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
422{
423 return pte;
424}
425
426static inline int pte_swp_soft_dirty(pte_t pte)
427{
428 return 0;
429}
430
431static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
432{
433 return pte;
434}
435
436static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
437{
438 return pte;
439}
440
441static inline pte_t pte_file_mksoft_dirty(pte_t pte)
442{
443 return pte;
444}
445
446static inline int pte_file_soft_dirty(pte_t pte)
447{
448 return 0;
449}
420#endif 450#endif
421 451
422#ifndef __HAVE_PFNMAP_TRACKING 452#ifndef __HAVE_PFNMAP_TRACKING
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 13821c339a41..5672d7ea1fa0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,7 +112,7 @@ struct mmu_gather {
112 112
113#define HAVE_GENERIC_MMU_GATHER 113#define HAVE_GENERIC_MMU_GATHER
114 114
115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); 115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
116void tlb_flush_mmu(struct mmu_gather *tlb); 116void tlb_flush_mmu(struct mmu_gather *tlb);
117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, 117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
118 unsigned long end); 118 unsigned long end);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 12083dc862a9..290734191f72 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -45,7 +45,6 @@
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/miscdevice.h> 46#include <linux/miscdevice.h>
47#include <linux/fs.h> 47#include <linux/fs.h>
48#include <linux/proc_fs.h>
49#include <linux/init.h> 48#include <linux/init.h>
50#include <linux/file.h> 49#include <linux/file.h>
51#include <linux/platform_device.h> 50#include <linux/platform_device.h>
@@ -62,20 +61,18 @@
62#endif 61#endif
63#include <asm/mman.h> 62#include <asm/mman.h>
64#include <asm/uaccess.h> 63#include <asm/uaccess.h>
65#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
66#include <linux/types.h> 64#include <linux/types.h>
67#include <linux/agp_backend.h> 65#include <linux/agp_backend.h>
68#endif
69#include <linux/workqueue.h> 66#include <linux/workqueue.h>
70#include <linux/poll.h> 67#include <linux/poll.h>
71#include <asm/pgalloc.h> 68#include <asm/pgalloc.h>
72#include <drm/drm.h> 69#include <drm/drm.h>
73#include <drm/drm_sarea.h> 70#include <drm/drm_sarea.h>
71#include <drm/drm_vma_manager.h>
74 72
75#include <linux/idr.h> 73#include <linux/idr.h>
76 74
77#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 75#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
78#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
79 76
80struct module; 77struct module;
81 78
@@ -140,19 +137,15 @@ int drm_err(const char *func, const char *format, ...);
140/* driver capabilities and requirements mask */ 137/* driver capabilities and requirements mask */
141#define DRIVER_USE_AGP 0x1 138#define DRIVER_USE_AGP 0x1
142#define DRIVER_REQUIRE_AGP 0x2 139#define DRIVER_REQUIRE_AGP 0x2
143#define DRIVER_USE_MTRR 0x4
144#define DRIVER_PCI_DMA 0x8 140#define DRIVER_PCI_DMA 0x8
145#define DRIVER_SG 0x10 141#define DRIVER_SG 0x10
146#define DRIVER_HAVE_DMA 0x20 142#define DRIVER_HAVE_DMA 0x20
147#define DRIVER_HAVE_IRQ 0x40 143#define DRIVER_HAVE_IRQ 0x40
148#define DRIVER_IRQ_SHARED 0x80 144#define DRIVER_IRQ_SHARED 0x80
149#define DRIVER_IRQ_VBL 0x100
150#define DRIVER_DMA_QUEUE 0x200
151#define DRIVER_FB_DMA 0x400
152#define DRIVER_IRQ_VBL2 0x800
153#define DRIVER_GEM 0x1000 145#define DRIVER_GEM 0x1000
154#define DRIVER_MODESET 0x2000 146#define DRIVER_MODESET 0x2000
155#define DRIVER_PRIME 0x4000 147#define DRIVER_PRIME 0x4000
148#define DRIVER_RENDER 0x8000
156 149
157#define DRIVER_BUS_PCI 0x1 150#define DRIVER_BUS_PCI 0x1
158#define DRIVER_BUS_PLATFORM 0x2 151#define DRIVER_BUS_PLATFORM 0x2
@@ -168,13 +161,7 @@ int drm_err(const char *func, const char *format, ...);
168#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ 161#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
169#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ 162#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
170#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ 163#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
171#define DRM_LOOPING_LIMIT 5000000
172#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */
173#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */
174
175#define DRM_FLAG_DEBUG 0x01
176 164
177#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
178#define DRM_MAP_HASH_OFFSET 0x10000000 165#define DRM_MAP_HASH_OFFSET 0x10000000
179 166
180/*@}*/ 167/*@}*/
@@ -263,9 +250,6 @@ int drm_err(const char *func, const char *format, ...);
263 250
264#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) 251#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
265 252
266#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
267#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
268
269#define DRM_IF_VERSION(maj, min) (maj << 16 | min) 253#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
270 254
271/** 255/**
@@ -307,6 +291,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
307#define DRM_ROOT_ONLY 0x4 291#define DRM_ROOT_ONLY 0x4
308#define DRM_CONTROL_ALLOW 0x8 292#define DRM_CONTROL_ALLOW 0x8
309#define DRM_UNLOCKED 0x10 293#define DRM_UNLOCKED 0x10
294#define DRM_RENDER_ALLOW 0x20
310 295
311struct drm_ioctl_desc { 296struct drm_ioctl_desc {
312 unsigned int cmd; 297 unsigned int cmd;
@@ -587,7 +572,6 @@ struct drm_map_list {
587 struct drm_local_map *map; /**< mapping */ 572 struct drm_local_map *map; /**< mapping */
588 uint64_t user_token; 573 uint64_t user_token;
589 struct drm_master *master; 574 struct drm_master *master;
590 struct drm_mm_node *file_offset_node; /**< fake offset */
591}; 575};
592 576
593/** 577/**
@@ -622,8 +606,7 @@ struct drm_ati_pcigart_info {
622 * GEM specific mm private for tracking GEM objects 606 * GEM specific mm private for tracking GEM objects
623 */ 607 */
624struct drm_gem_mm { 608struct drm_gem_mm {
625 struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */ 609 struct drm_vma_offset_manager vma_manager;
626 struct drm_open_hash offset_hash; /**< User token hash table for maps */
627}; 610};
628 611
629/** 612/**
@@ -634,8 +617,16 @@ struct drm_gem_object {
634 /** Reference count of this object */ 617 /** Reference count of this object */
635 struct kref refcount; 618 struct kref refcount;
636 619
637 /** Handle count of this object. Each handle also holds a reference */ 620 /**
638 atomic_t handle_count; /* number of handles on this object */ 621 * handle_count - gem file_priv handle count of this object
622 *
623 * Each handle also holds a reference. Note that when the handle_count
624 * drops to 0 any global names (e.g. the id in the flink namespace) will
625 * be cleared.
626 *
627 * Protected by dev->object_name_lock.
628 * */
629 unsigned handle_count;
639 630
640 /** Related drm device */ 631 /** Related drm device */
641 struct drm_device *dev; 632 struct drm_device *dev;
@@ -644,7 +635,7 @@ struct drm_gem_object {
644 struct file *filp; 635 struct file *filp;
645 636
646 /* Mapping info for this object */ 637 /* Mapping info for this object */
647 struct drm_map_list map_list; 638 struct drm_vma_offset_node vma_node;
648 639
649 /** 640 /**
650 * Size of the object, in bytes. Immutable over the object's 641 * Size of the object, in bytes. Immutable over the object's
@@ -678,10 +669,32 @@ struct drm_gem_object {
678 669
679 void *driver_private; 670 void *driver_private;
680 671
681 /* dma buf exported from this GEM object */ 672 /**
682 struct dma_buf *export_dma_buf; 673 * dma_buf - dma buf associated with this GEM object
674 *
675 * Pointer to the dma-buf associated with this gem object (either
676 * through importing or exporting). We break the resulting reference
677 * loop when the last gem handle for this object is released.
678 *
679 * Protected by obj->object_name_lock
680 */
681 struct dma_buf *dma_buf;
683 682
684 /* dma buf attachment backing this object */ 683 /**
684 * import_attach - dma buf attachment backing this object
685 *
686 * Any foreign dma_buf imported as a gem object has this set to the
687 * attachment point for the device. This is invariant over the lifetime
688 * of a gem object.
689 *
690 * The driver's ->gem_free_object callback is responsible for cleaning
691 * up the dma_buf attachment and references acquired at import time.
692 *
693 * Note that the drm gem/prime core does not depend upon drivers setting
694 * this field any more. So for drivers where this doesn't make sense
695 * (e.g. virtual devices or a displaylink behind an usb bus) they can
696 * simply leave it as NULL.
697 */
685 struct dma_buf_attachment *import_attach; 698 struct dma_buf_attachment *import_attach;
686}; 699};
687 700
@@ -737,6 +750,7 @@ struct drm_bus {
737 int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p); 750 int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
738 /* hooks that are for PCI */ 751 /* hooks that are for PCI */
739 int (*agp_init)(struct drm_device *dev); 752 int (*agp_init)(struct drm_device *dev);
753 void (*agp_destroy)(struct drm_device *dev);
740 754
741}; 755};
742 756
@@ -885,8 +899,6 @@ struct drm_driver {
885 void (*irq_preinstall) (struct drm_device *dev); 899 void (*irq_preinstall) (struct drm_device *dev);
886 int (*irq_postinstall) (struct drm_device *dev); 900 int (*irq_postinstall) (struct drm_device *dev);
887 void (*irq_uninstall) (struct drm_device *dev); 901 void (*irq_uninstall) (struct drm_device *dev);
888 void (*set_version) (struct drm_device *dev,
889 struct drm_set_version *sv);
890 902
891 /* Master routines */ 903 /* Master routines */
892 int (*master_create)(struct drm_device *dev, struct drm_master *master); 904 int (*master_create)(struct drm_device *dev, struct drm_master *master);
@@ -966,7 +978,7 @@ struct drm_driver {
966 978
967 u32 driver_features; 979 u32 driver_features;
968 int dev_priv_size; 980 int dev_priv_size;
969 struct drm_ioctl_desc *ioctls; 981 const struct drm_ioctl_desc *ioctls;
970 int num_ioctls; 982 int num_ioctls;
971 const struct file_operations *fops; 983 const struct file_operations *fops;
972 union { 984 union {
@@ -1037,8 +1049,6 @@ struct drm_minor {
1037 struct device kdev; /**< Linux device */ 1049 struct device kdev; /**< Linux device */
1038 struct drm_device *dev; 1050 struct drm_device *dev;
1039 1051
1040 struct proc_dir_entry *proc_root; /**< proc directory entry */
1041 struct drm_info_node proc_nodes;
1042 struct dentry *debugfs_root; 1052 struct dentry *debugfs_root;
1043 1053
1044 struct list_head debugfs_list; 1054 struct list_head debugfs_list;
@@ -1131,12 +1141,7 @@ struct drm_device {
1131 /*@{ */ 1141 /*@{ */
1132 int irq_enabled; /**< True if irq handler is enabled */ 1142 int irq_enabled; /**< True if irq handler is enabled */
1133 __volatile__ long context_flag; /**< Context swapping flag */ 1143 __volatile__ long context_flag; /**< Context swapping flag */
1134 __volatile__ long interrupt_flag; /**< Interruption handler flag */
1135 __volatile__ long dma_flag; /**< DMA dispatch flag */
1136 wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
1137 int last_checked; /**< Last context checked for DMA */
1138 int last_context; /**< Last current context */ 1144 int last_context; /**< Last current context */
1139 unsigned long last_switch; /**< jiffies at last context switch */
1140 /*@} */ 1145 /*@} */
1141 1146
1142 struct work_struct work; 1147 struct work_struct work;
@@ -1174,12 +1179,6 @@ struct drm_device {
1174 spinlock_t event_lock; 1179 spinlock_t event_lock;
1175 1180
1176 /*@} */ 1181 /*@} */
1177 cycles_t ctx_start;
1178 cycles_t lck_start;
1179
1180 struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
1181 wait_queue_head_t buf_readers; /**< Processes waiting to read */
1182 wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
1183 1182
1184 struct drm_agp_head *agp; /**< AGP data */ 1183 struct drm_agp_head *agp; /**< AGP data */
1185 1184
@@ -1207,12 +1206,13 @@ struct drm_device {
1207 unsigned int agp_buffer_token; 1206 unsigned int agp_buffer_token;
1208 struct drm_minor *control; /**< Control node for card */ 1207 struct drm_minor *control; /**< Control node for card */
1209 struct drm_minor *primary; /**< render type primary screen head */ 1208 struct drm_minor *primary; /**< render type primary screen head */
1209 struct drm_minor *render; /**< render node for card */
1210 1210
1211 struct drm_mode_config mode_config; /**< Current mode config */ 1211 struct drm_mode_config mode_config; /**< Current mode config */
1212 1212
1213 /** \name GEM information */ 1213 /** \name GEM information */
1214 /*@{ */ 1214 /*@{ */
1215 spinlock_t object_name_lock; 1215 struct mutex object_name_lock;
1216 struct idr object_name_idr; 1216 struct idr object_name_idr;
1217 /*@} */ 1217 /*@} */
1218 int switch_power_state; 1218 int switch_power_state;
@@ -1223,6 +1223,7 @@ struct drm_device {
1223#define DRM_SWITCH_POWER_ON 0 1223#define DRM_SWITCH_POWER_ON 0
1224#define DRM_SWITCH_POWER_OFF 1 1224#define DRM_SWITCH_POWER_OFF 1
1225#define DRM_SWITCH_POWER_CHANGING 2 1225#define DRM_SWITCH_POWER_CHANGING 2
1226#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
1226 1227
1227static __inline__ int drm_core_check_feature(struct drm_device *dev, 1228static __inline__ int drm_core_check_feature(struct drm_device *dev,
1228 int feature) 1229 int feature)
@@ -1235,25 +1236,6 @@ static inline int drm_dev_to_irq(struct drm_device *dev)
1235 return dev->driver->bus->get_irq(dev); 1236 return dev->driver->bus->get_irq(dev);
1236} 1237}
1237 1238
1238
1239#if __OS_HAS_AGP
1240static inline int drm_core_has_AGP(struct drm_device *dev)
1241{
1242 return drm_core_check_feature(dev, DRIVER_USE_AGP);
1243}
1244#else
1245#define drm_core_has_AGP(dev) (0)
1246#endif
1247
1248#if __OS_HAS_MTRR
1249static inline int drm_core_has_MTRR(struct drm_device *dev)
1250{
1251 return drm_core_check_feature(dev, DRIVER_USE_MTRR);
1252}
1253#else
1254#define drm_core_has_MTRR(dev) (0)
1255#endif
1256
1257static inline void drm_device_set_unplugged(struct drm_device *dev) 1239static inline void drm_device_set_unplugged(struct drm_device *dev)
1258{ 1240{
1259 smp_wmb(); 1241 smp_wmb();
@@ -1272,6 +1254,11 @@ static inline bool drm_modeset_is_locked(struct drm_device *dev)
1272 return mutex_is_locked(&dev->mode_config.mutex); 1254 return mutex_is_locked(&dev->mode_config.mutex);
1273} 1255}
1274 1256
1257static inline bool drm_is_render_client(struct drm_file *file_priv)
1258{
1259 return file_priv->minor->type == DRM_MINOR_RENDER;
1260}
1261
1275/******************************************************************/ 1262/******************************************************************/
1276/** \name Internal function definitions */ 1263/** \name Internal function definitions */
1277/*@{*/ 1264/*@{*/
@@ -1287,7 +1274,6 @@ extern int drm_lastclose(struct drm_device *dev);
1287extern struct mutex drm_global_mutex; 1274extern struct mutex drm_global_mutex;
1288extern int drm_open(struct inode *inode, struct file *filp); 1275extern int drm_open(struct inode *inode, struct file *filp);
1289extern int drm_stub_open(struct inode *inode, struct file *filp); 1276extern int drm_stub_open(struct inode *inode, struct file *filp);
1290extern int drm_fasync(int fd, struct file *filp, int on);
1291extern ssize_t drm_read(struct file *filp, char __user *buffer, 1277extern ssize_t drm_read(struct file *filp, char __user *buffer,
1292 size_t count, loff_t *offset); 1278 size_t count, loff_t *offset);
1293extern int drm_release(struct inode *inode, struct file *filp); 1279extern int drm_release(struct inode *inode, struct file *filp);
@@ -1301,14 +1287,6 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
1301 1287
1302 /* Memory management support (drm_memory.h) */ 1288 /* Memory management support (drm_memory.h) */
1303#include <drm/drm_memory.h> 1289#include <drm/drm_memory.h>
1304extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
1305extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
1306extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
1307 struct page **pages,
1308 unsigned long num_pages,
1309 uint32_t gtt_offset,
1310 uint32_t type);
1311extern int drm_unbind_agp(DRM_AGP_MEM * handle);
1312 1290
1313 /* Misc. IOCTL support (drm_ioctl.h) */ 1291 /* Misc. IOCTL support (drm_ioctl.h) */
1314extern int drm_irq_by_busid(struct drm_device *dev, void *data, 1292extern int drm_irq_by_busid(struct drm_device *dev, void *data,
@@ -1335,8 +1313,6 @@ extern int drm_resctx(struct drm_device *dev, void *data,
1335 struct drm_file *file_priv); 1313 struct drm_file *file_priv);
1336extern int drm_addctx(struct drm_device *dev, void *data, 1314extern int drm_addctx(struct drm_device *dev, void *data,
1337 struct drm_file *file_priv); 1315 struct drm_file *file_priv);
1338extern int drm_modctx(struct drm_device *dev, void *data,
1339 struct drm_file *file_priv);
1340extern int drm_getctx(struct drm_device *dev, void *data, 1316extern int drm_getctx(struct drm_device *dev, void *data,
1341 struct drm_file *file_priv); 1317 struct drm_file *file_priv);
1342extern int drm_switchctx(struct drm_device *dev, void *data, 1318extern int drm_switchctx(struct drm_device *dev, void *data,
@@ -1346,9 +1322,10 @@ extern int drm_newctx(struct drm_device *dev, void *data,
1346extern int drm_rmctx(struct drm_device *dev, void *data, 1322extern int drm_rmctx(struct drm_device *dev, void *data,
1347 struct drm_file *file_priv); 1323 struct drm_file *file_priv);
1348 1324
1349extern int drm_ctxbitmap_init(struct drm_device *dev); 1325extern void drm_legacy_ctxbitmap_init(struct drm_device *dev);
1350extern void drm_ctxbitmap_cleanup(struct drm_device *dev); 1326extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
1351extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); 1327extern void drm_legacy_ctxbitmap_release(struct drm_device *dev,
1328 struct drm_file *file_priv);
1352 1329
1353extern int drm_setsareactx(struct drm_device *dev, void *data, 1330extern int drm_setsareactx(struct drm_device *dev, void *data,
1354 struct drm_file *file_priv); 1331 struct drm_file *file_priv);
@@ -1405,11 +1382,12 @@ extern int drm_freebufs(struct drm_device *dev, void *data,
1405 struct drm_file *file_priv); 1382 struct drm_file *file_priv);
1406extern int drm_mapbufs(struct drm_device *dev, void *data, 1383extern int drm_mapbufs(struct drm_device *dev, void *data,
1407 struct drm_file *file_priv); 1384 struct drm_file *file_priv);
1408extern int drm_order(unsigned long size); 1385extern int drm_dma_ioctl(struct drm_device *dev, void *data,
1386 struct drm_file *file_priv);
1409 1387
1410 /* DMA support (drm_dma.h) */ 1388 /* DMA support (drm_dma.h) */
1411extern int drm_dma_setup(struct drm_device *dev); 1389extern int drm_legacy_dma_setup(struct drm_device *dev);
1412extern void drm_dma_takedown(struct drm_device *dev); 1390extern void drm_legacy_dma_takedown(struct drm_device *dev);
1413extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); 1391extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
1414extern void drm_core_reclaim_buffers(struct drm_device *dev, 1392extern void drm_core_reclaim_buffers(struct drm_device *dev,
1415 struct drm_file *filp); 1393 struct drm_file *filp);
@@ -1423,7 +1401,6 @@ extern int drm_irq_uninstall(struct drm_device *dev);
1423extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); 1401extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
1424extern int drm_wait_vblank(struct drm_device *dev, void *data, 1402extern int drm_wait_vblank(struct drm_device *dev, void *data,
1425 struct drm_file *filp); 1403 struct drm_file *filp);
1426extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
1427extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1404extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1428extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 1405extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
1429 struct timeval *vblanktime); 1406 struct timeval *vblanktime);
@@ -1465,31 +1442,8 @@ extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1465 struct drm_file *file_priv); 1442 struct drm_file *file_priv);
1466 1443
1467 /* AGP/GART support (drm_agpsupport.h) */ 1444 /* AGP/GART support (drm_agpsupport.h) */
1468extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); 1445
1469extern int drm_agp_acquire(struct drm_device *dev); 1446#include <drm/drm_agpsupport.h>
1470extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
1471 struct drm_file *file_priv);
1472extern int drm_agp_release(struct drm_device *dev);
1473extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
1474 struct drm_file *file_priv);
1475extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
1476extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
1477 struct drm_file *file_priv);
1478extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
1479extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
1480 struct drm_file *file_priv);
1481extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
1482extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
1483 struct drm_file *file_priv);
1484extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
1485extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
1486 struct drm_file *file_priv);
1487extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
1488extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
1489 struct drm_file *file_priv);
1490extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
1491extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
1492 struct drm_file *file_priv);
1493 1447
1494 /* Stub support (drm_stub.h) */ 1448 /* Stub support (drm_stub.h) */
1495extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, 1449extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
@@ -1504,23 +1458,19 @@ extern void drm_put_dev(struct drm_device *dev);
1504extern int drm_put_minor(struct drm_minor **minor); 1458extern int drm_put_minor(struct drm_minor **minor);
1505extern void drm_unplug_dev(struct drm_device *dev); 1459extern void drm_unplug_dev(struct drm_device *dev);
1506extern unsigned int drm_debug; 1460extern unsigned int drm_debug;
1461extern unsigned int drm_rnodes;
1507 1462
1508extern unsigned int drm_vblank_offdelay; 1463extern unsigned int drm_vblank_offdelay;
1509extern unsigned int drm_timestamp_precision; 1464extern unsigned int drm_timestamp_precision;
1510extern unsigned int drm_timestamp_monotonic; 1465extern unsigned int drm_timestamp_monotonic;
1511 1466
1512extern struct class *drm_class; 1467extern struct class *drm_class;
1513extern struct proc_dir_entry *drm_proc_root;
1514extern struct dentry *drm_debugfs_root; 1468extern struct dentry *drm_debugfs_root;
1515 1469
1516extern struct idr drm_minors_idr; 1470extern struct idr drm_minors_idr;
1517 1471
1518extern struct drm_local_map *drm_getsarea(struct drm_device *dev); 1472extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
1519 1473
1520 /* Proc support (drm_proc.h) */
1521extern int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root);
1522extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
1523
1524 /* Debugfs support */ 1474 /* Debugfs support */
1525#if defined(CONFIG_DEBUG_FS) 1475#if defined(CONFIG_DEBUG_FS)
1526extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, 1476extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
@@ -1550,6 +1500,7 @@ extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1550 struct dma_buf *dma_buf); 1500 struct dma_buf *dma_buf);
1551extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, 1501extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
1552 struct drm_file *file_priv, int prime_fd, uint32_t *handle); 1502 struct drm_file *file_priv, int prime_fd, uint32_t *handle);
1503extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
1553 1504
1554extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 1505extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
1555 struct drm_file *file_priv); 1506 struct drm_file *file_priv);
@@ -1561,25 +1512,22 @@ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **
1561extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages); 1512extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
1562extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); 1513extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
1563 1514
1515int drm_gem_dumb_destroy(struct drm_file *file,
1516 struct drm_device *dev,
1517 uint32_t handle);
1564 1518
1565void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); 1519void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
1566void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); 1520void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
1567int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); 1521void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
1568void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
1569
1570int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
1571int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
1572 struct drm_gem_object **obj);
1573 1522
1574#if DRM_DEBUG_CODE 1523#if DRM_DEBUG_CODE
1575extern int drm_vma_info(struct seq_file *m, void *data); 1524extern int drm_vma_info(struct seq_file *m, void *data);
1576#endif 1525#endif
1577 1526
1578 /* Scatter Gather Support (drm_scatter.h) */ 1527 /* Scatter Gather Support (drm_scatter.h) */
1579extern void drm_sg_cleanup(struct drm_sg_mem * entry); 1528extern void drm_legacy_sg_cleanup(struct drm_device *dev);
1580extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, 1529extern int drm_sg_alloc(struct drm_device *dev, void *data,
1581 struct drm_file *file_priv); 1530 struct drm_file *file_priv);
1582extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
1583extern int drm_sg_free(struct drm_device *dev, void *data, 1531extern int drm_sg_free(struct drm_device *dev, void *data,
1584 struct drm_file *file_priv); 1532 struct drm_file *file_priv);
1585 1533
@@ -1613,9 +1561,8 @@ struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1613 size_t size); 1561 size_t size);
1614int drm_gem_object_init(struct drm_device *dev, 1562int drm_gem_object_init(struct drm_device *dev,
1615 struct drm_gem_object *obj, size_t size); 1563 struct drm_gem_object *obj, size_t size);
1616int drm_gem_private_object_init(struct drm_device *dev, 1564void drm_gem_private_object_init(struct drm_device *dev,
1617 struct drm_gem_object *obj, size_t size); 1565 struct drm_gem_object *obj, size_t size);
1618void drm_gem_object_handle_free(struct drm_gem_object *obj);
1619void drm_gem_vm_open(struct vm_area_struct *vma); 1566void drm_gem_vm_open(struct vm_area_struct *vma);
1620void drm_gem_vm_close(struct vm_area_struct *vma); 1567void drm_gem_vm_close(struct vm_area_struct *vma);
1621int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1568int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
@@ -1640,66 +1587,32 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
1640static inline void 1587static inline void
1641drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) 1588drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
1642{ 1589{
1643 if (obj != NULL) { 1590 if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
1644 struct drm_device *dev = obj->dev; 1591 struct drm_device *dev = obj->dev;
1592
1645 mutex_lock(&dev->struct_mutex); 1593 mutex_lock(&dev->struct_mutex);
1646 kref_put(&obj->refcount, drm_gem_object_free); 1594 if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
1595 drm_gem_object_free(&obj->refcount);
1647 mutex_unlock(&dev->struct_mutex); 1596 mutex_unlock(&dev->struct_mutex);
1648 } 1597 }
1649} 1598}
1650 1599
1600int drm_gem_handle_create_tail(struct drm_file *file_priv,
1601 struct drm_gem_object *obj,
1602 u32 *handlep);
1651int drm_gem_handle_create(struct drm_file *file_priv, 1603int drm_gem_handle_create(struct drm_file *file_priv,
1652 struct drm_gem_object *obj, 1604 struct drm_gem_object *obj,
1653 u32 *handlep); 1605 u32 *handlep);
1654int drm_gem_handle_delete(struct drm_file *filp, u32 handle); 1606int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
1655 1607
1656static inline void
1657drm_gem_object_handle_reference(struct drm_gem_object *obj)
1658{
1659 drm_gem_object_reference(obj);
1660 atomic_inc(&obj->handle_count);
1661}
1662
1663static inline void
1664drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1665{
1666 if (obj == NULL)
1667 return;
1668
1669 if (atomic_read(&obj->handle_count) == 0)
1670 return;
1671 /*
1672 * Must bump handle count first as this may be the last
1673 * ref, in which case the object would disappear before we
1674 * checked for a name
1675 */
1676 if (atomic_dec_and_test(&obj->handle_count))
1677 drm_gem_object_handle_free(obj);
1678 drm_gem_object_unreference(obj);
1679}
1680
1681static inline void
1682drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
1683{
1684 if (obj == NULL)
1685 return;
1686
1687 if (atomic_read(&obj->handle_count) == 0)
1688 return;
1689
1690 /*
1691 * Must bump handle count first as this may be the last
1692 * ref, in which case the object would disappear before we
1693 * checked for a name
1694 */
1695
1696 if (atomic_dec_and_test(&obj->handle_count))
1697 drm_gem_object_handle_free(obj);
1698 drm_gem_object_unreference_unlocked(obj);
1699}
1700 1608
1701void drm_gem_free_mmap_offset(struct drm_gem_object *obj); 1609void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
1702int drm_gem_create_mmap_offset(struct drm_gem_object *obj); 1610int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
1611int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
1612
1613struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
1614void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
1615 bool dirty, bool accessed);
1703 1616
1704struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, 1617struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1705 struct drm_file *filp, 1618 struct drm_file *filp,
@@ -1769,9 +1682,6 @@ extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
1769extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); 1682extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
1770extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device); 1683extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device);
1771 1684
1772extern int drm_get_platform_dev(struct platform_device *pdev,
1773 struct drm_driver *driver);
1774
1775/* returns true if currently okay to sleep */ 1685/* returns true if currently okay to sleep */
1776static __inline__ bool drm_can_sleep(void) 1686static __inline__ bool drm_can_sleep(void)
1777{ 1687{
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
new file mode 100644
index 000000000000..a184eeee9c96
--- /dev/null
+++ b/include/drm/drm_agpsupport.h
@@ -0,0 +1,194 @@
1#ifndef _DRM_AGPSUPPORT_H_
2#define _DRM_AGPSUPPORT_H_
3
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/mutex.h>
7#include <linux/types.h>
8#include <linux/agp_backend.h>
9#include <drm/drmP.h>
10
11#if __OS_HAS_AGP
12
13void drm_free_agp(DRM_AGP_MEM * handle, int pages);
14int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
15int drm_unbind_agp(DRM_AGP_MEM * handle);
16DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
17 struct page **pages,
18 unsigned long num_pages,
19 uint32_t gtt_offset,
20 uint32_t type);
21
22struct drm_agp_head *drm_agp_init(struct drm_device *dev);
23void drm_agp_destroy(struct drm_agp_head *agp);
24void drm_agp_clear(struct drm_device *dev);
25int drm_agp_acquire(struct drm_device *dev);
26int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
27 struct drm_file *file_priv);
28int drm_agp_release(struct drm_device *dev);
29int drm_agp_release_ioctl(struct drm_device *dev, void *data,
30 struct drm_file *file_priv);
31int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
32int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
33 struct drm_file *file_priv);
34int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
35int drm_agp_info_ioctl(struct drm_device *dev, void *data,
36 struct drm_file *file_priv);
37int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
38int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
39 struct drm_file *file_priv);
40int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
41int drm_agp_free_ioctl(struct drm_device *dev, void *data,
42 struct drm_file *file_priv);
43int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
44int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
45 struct drm_file *file_priv);
46int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
47int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
48 struct drm_file *file_priv);
49
50static inline int drm_core_has_AGP(struct drm_device *dev)
51{
52 return drm_core_check_feature(dev, DRIVER_USE_AGP);
53}
54
55#else /* __OS_HAS_AGP */
56
57static inline void drm_free_agp(DRM_AGP_MEM * handle, int pages)
58{
59}
60
61static inline int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
62{
63 return -ENODEV;
64}
65
66static inline int drm_unbind_agp(DRM_AGP_MEM * handle)
67{
68 return -ENODEV;
69}
70
71static inline DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
72 struct page **pages,
73 unsigned long num_pages,
74 uint32_t gtt_offset,
75 uint32_t type)
76{
77 return NULL;
78}
79
80static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
81{
82 return NULL;
83}
84
85static inline void drm_agp_destroy(struct drm_agp_head *agp)
86{
87}
88
89static inline void drm_agp_clear(struct drm_device *dev)
90{
91}
92
93static inline int drm_agp_acquire(struct drm_device *dev)
94{
95 return -ENODEV;
96}
97
98static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
99 struct drm_file *file_priv)
100{
101 return -ENODEV;
102}
103
104static inline int drm_agp_release(struct drm_device *dev)
105{
106 return -ENODEV;
107}
108
109static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data,
110 struct drm_file *file_priv)
111{
112 return -ENODEV;
113}
114
115static inline int drm_agp_enable(struct drm_device *dev,
116 struct drm_agp_mode mode)
117{
118 return -ENODEV;
119}
120
121static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
122 struct drm_file *file_priv)
123{
124 return -ENODEV;
125}
126
127static inline int drm_agp_info(struct drm_device *dev,
128 struct drm_agp_info *info)
129{
130 return -ENODEV;
131}
132
133static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data,
134 struct drm_file *file_priv)
135{
136 return -ENODEV;
137}
138
139static inline int drm_agp_alloc(struct drm_device *dev,
140 struct drm_agp_buffer *request)
141{
142 return -ENODEV;
143}
144
145static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
146 struct drm_file *file_priv)
147{
148 return -ENODEV;
149}
150
151static inline int drm_agp_free(struct drm_device *dev,
152 struct drm_agp_buffer *request)
153{
154 return -ENODEV;
155}
156
157static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data,
158 struct drm_file *file_priv)
159{
160 return -ENODEV;
161}
162
163static inline int drm_agp_unbind(struct drm_device *dev,
164 struct drm_agp_binding *request)
165{
166 return -ENODEV;
167}
168
169static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
170 struct drm_file *file_priv)
171{
172 return -ENODEV;
173}
174
175static inline int drm_agp_bind(struct drm_device *dev,
176 struct drm_agp_binding *request)
177{
178 return -ENODEV;
179}
180
181static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
182 struct drm_file *file_priv)
183{
184 return -ENODEV;
185}
186
187static inline int drm_core_has_AGP(struct drm_device *dev)
188{
189 return 0;
190}
191
192#endif /* __OS_HAS_AGP */
193
194#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index fa12a2fa4293..24f499569a2f 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -49,6 +49,7 @@ struct drm_clip_rect;
49#define DRM_MODE_OBJECT_FB 0xfbfbfbfb 49#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
50#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb 50#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
51#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee 51#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
52#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
52 53
53struct drm_mode_object { 54struct drm_mode_object {
54 uint32_t id; 55 uint32_t id;
@@ -305,6 +306,7 @@ struct drm_connector;
305struct drm_encoder; 306struct drm_encoder;
306struct drm_pending_vblank_event; 307struct drm_pending_vblank_event;
307struct drm_plane; 308struct drm_plane;
309struct drm_bridge;
308 310
309/** 311/**
310 * drm_crtc_funcs - control CRTCs for a given device 312 * drm_crtc_funcs - control CRTCs for a given device
@@ -363,7 +365,8 @@ struct drm_crtc_funcs {
363 */ 365 */
364 int (*page_flip)(struct drm_crtc *crtc, 366 int (*page_flip)(struct drm_crtc *crtc,
365 struct drm_framebuffer *fb, 367 struct drm_framebuffer *fb,
366 struct drm_pending_vblank_event *event); 368 struct drm_pending_vblank_event *event,
369 uint32_t flags);
367 370
368 int (*set_property)(struct drm_crtc *crtc, 371 int (*set_property)(struct drm_crtc *crtc,
369 struct drm_property *property, uint64_t val); 372 struct drm_property *property, uint64_t val);
@@ -494,8 +497,6 @@ struct drm_encoder_funcs {
494 void (*destroy)(struct drm_encoder *encoder); 497 void (*destroy)(struct drm_encoder *encoder);
495}; 498};
496 499
497#define DRM_CONNECTOR_MAX_UMODES 16
498#define DRM_CONNECTOR_LEN 32
499#define DRM_CONNECTOR_MAX_ENCODER 3 500#define DRM_CONNECTOR_MAX_ENCODER 3
500 501
501/** 502/**
@@ -507,6 +508,7 @@ struct drm_encoder_funcs {
507 * @possible_crtcs: bitmask of potential CRTC bindings 508 * @possible_crtcs: bitmask of potential CRTC bindings
508 * @possible_clones: bitmask of potential sibling encoders for cloning 509 * @possible_clones: bitmask of potential sibling encoders for cloning
509 * @crtc: currently bound CRTC 510 * @crtc: currently bound CRTC
511 * @bridge: bridge associated to the encoder
510 * @funcs: control functions 512 * @funcs: control functions
511 * @helper_private: mid-layer private data 513 * @helper_private: mid-layer private data
512 * 514 *
@@ -523,6 +525,7 @@ struct drm_encoder {
523 uint32_t possible_clones; 525 uint32_t possible_clones;
524 526
525 struct drm_crtc *crtc; 527 struct drm_crtc *crtc;
528 struct drm_bridge *bridge;
526 const struct drm_encoder_funcs *funcs; 529 const struct drm_encoder_funcs *funcs;
527 void *helper_private; 530 void *helper_private;
528}; 531};
@@ -683,6 +686,48 @@ struct drm_plane {
683}; 686};
684 687
685/** 688/**
689 * drm_bridge_funcs - drm_bridge control functions
690 * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
691 * @disable: Called right before encoder prepare, disables the bridge
692 * @post_disable: Called right after encoder prepare, for lockstepped disable
693 * @mode_set: Set this mode to the bridge
694 * @pre_enable: Called right before encoder commit, for lockstepped commit
695 * @enable: Called right after encoder commit, enables the bridge
696 * @destroy: make object go away
697 */
698struct drm_bridge_funcs {
699 bool (*mode_fixup)(struct drm_bridge *bridge,
700 const struct drm_display_mode *mode,
701 struct drm_display_mode *adjusted_mode);
702 void (*disable)(struct drm_bridge *bridge);
703 void (*post_disable)(struct drm_bridge *bridge);
704 void (*mode_set)(struct drm_bridge *bridge,
705 struct drm_display_mode *mode,
706 struct drm_display_mode *adjusted_mode);
707 void (*pre_enable)(struct drm_bridge *bridge);
708 void (*enable)(struct drm_bridge *bridge);
709 void (*destroy)(struct drm_bridge *bridge);
710};
711
712/**
713 * drm_bridge - central DRM bridge control structure
714 * @dev: DRM device this bridge belongs to
715 * @head: list management
716 * @base: base mode object
717 * @funcs: control functions
718 * @driver_private: pointer to the bridge driver's internal context
719 */
720struct drm_bridge {
721 struct drm_device *dev;
722 struct list_head head;
723
724 struct drm_mode_object base;
725
726 const struct drm_bridge_funcs *funcs;
727 void *driver_private;
728};
729
730/**
686 * drm_mode_set - new values for a CRTC config change 731 * drm_mode_set - new values for a CRTC config change
687 * @head: list management 732 * @head: list management
688 * @fb: framebuffer to use for new config 733 * @fb: framebuffer to use for new config
@@ -742,6 +787,7 @@ struct drm_mode_group {
742 uint32_t num_crtcs; 787 uint32_t num_crtcs;
743 uint32_t num_encoders; 788 uint32_t num_encoders;
744 uint32_t num_connectors; 789 uint32_t num_connectors;
790 uint32_t num_bridges;
745 791
746 /* list of object IDs for this group */ 792 /* list of object IDs for this group */
747 uint32_t *id_list; 793 uint32_t *id_list;
@@ -756,6 +802,8 @@ struct drm_mode_group {
756 * @fb_list: list of framebuffers available 802 * @fb_list: list of framebuffers available
757 * @num_connector: number of connectors on this device 803 * @num_connector: number of connectors on this device
758 * @connector_list: list of connector objects 804 * @connector_list: list of connector objects
805 * @num_bridge: number of bridges on this device
806 * @bridge_list: list of bridge objects
759 * @num_encoder: number of encoders on this device 807 * @num_encoder: number of encoders on this device
760 * @encoder_list: list of encoder objects 808 * @encoder_list: list of encoder objects
761 * @num_crtc: number of CRTCs on this device 809 * @num_crtc: number of CRTCs on this device
@@ -793,6 +841,8 @@ struct drm_mode_config {
793 841
794 int num_connector; 842 int num_connector;
795 struct list_head connector_list; 843 struct list_head connector_list;
844 int num_bridge;
845 struct list_head bridge_list;
796 int num_encoder; 846 int num_encoder;
797 struct list_head encoder_list; 847 struct list_head encoder_list;
798 int num_plane; 848 int num_plane;
@@ -839,11 +889,13 @@ struct drm_mode_config {
839 889
840 /* Optional properties */ 890 /* Optional properties */
841 struct drm_property *scaling_mode_property; 891 struct drm_property *scaling_mode_property;
842 struct drm_property *dithering_mode_property;
843 struct drm_property *dirty_info_property; 892 struct drm_property *dirty_info_property;
844 893
845 /* dumb ioctl parameters */ 894 /* dumb ioctl parameters */
846 uint32_t preferred_depth, prefer_shadow; 895 uint32_t preferred_depth, prefer_shadow;
896
897 /* whether async page flip is supported or not */
898 bool async_page_flip;
847}; 899};
848 900
849#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) 901#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -869,6 +921,8 @@ extern int drm_crtc_init(struct drm_device *dev,
869 const struct drm_crtc_funcs *funcs); 921 const struct drm_crtc_funcs *funcs);
870extern void drm_crtc_cleanup(struct drm_crtc *crtc); 922extern void drm_crtc_cleanup(struct drm_crtc *crtc);
871 923
924extern void drm_connector_ida_init(void);
925extern void drm_connector_ida_destroy(void);
872extern int drm_connector_init(struct drm_device *dev, 926extern int drm_connector_init(struct drm_device *dev,
873 struct drm_connector *connector, 927 struct drm_connector *connector,
874 const struct drm_connector_funcs *funcs, 928 const struct drm_connector_funcs *funcs,
@@ -878,6 +932,10 @@ extern void drm_connector_cleanup(struct drm_connector *connector);
878/* helper to unplug all connectors from sysfs for device */ 932/* helper to unplug all connectors from sysfs for device */
879extern void drm_connector_unplug_all(struct drm_device *dev); 933extern void drm_connector_unplug_all(struct drm_device *dev);
880 934
935extern int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
936 const struct drm_bridge_funcs *funcs);
937extern void drm_bridge_cleanup(struct drm_bridge *bridge);
938
881extern int drm_encoder_init(struct drm_device *dev, 939extern int drm_encoder_init(struct drm_device *dev,
882 struct drm_encoder *encoder, 940 struct drm_encoder *encoder,
883 const struct drm_encoder_funcs *funcs, 941 const struct drm_encoder_funcs *funcs,
@@ -908,7 +966,6 @@ extern struct edid *drm_get_edid(struct drm_connector *connector,
908 struct i2c_adapter *adapter); 966 struct i2c_adapter *adapter);
909extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); 967extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
910extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); 968extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
911extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
912extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); 969extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
913extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, 970extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
914 const struct drm_display_mode *mode); 971 const struct drm_display_mode *mode);
@@ -925,14 +982,9 @@ extern int drm_mode_height(const struct drm_display_mode *mode);
925/* for us by fb module */ 982/* for us by fb module */
926extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); 983extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
927extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); 984extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
928extern void drm_mode_list_concat(struct list_head *head,
929 struct list_head *new);
930extern void drm_mode_validate_size(struct drm_device *dev, 985extern void drm_mode_validate_size(struct drm_device *dev,
931 struct list_head *mode_list, 986 struct list_head *mode_list,
932 int maxX, int maxY, int maxPitch); 987 int maxX, int maxY, int maxPitch);
933extern void drm_mode_validate_clocks(struct drm_device *dev,
934 struct list_head *mode_list,
935 int *min, int *max, int n_ranges);
936extern void drm_mode_prune_invalid(struct drm_device *dev, 988extern void drm_mode_prune_invalid(struct drm_device *dev,
937 struct list_head *mode_list, bool verbose); 989 struct list_head *mode_list, bool verbose);
938extern void drm_mode_sort(struct list_head *mode_list); 990extern void drm_mode_sort(struct list_head *mode_list);
@@ -949,9 +1001,6 @@ extern int drm_object_property_set_value(struct drm_mode_object *obj,
949extern int drm_object_property_get_value(struct drm_mode_object *obj, 1001extern int drm_object_property_get_value(struct drm_mode_object *obj,
950 struct drm_property *property, 1002 struct drm_property *property,
951 uint64_t *value); 1003 uint64_t *value);
952extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
953extern void drm_framebuffer_set_object(struct drm_device *dev,
954 unsigned long handle);
955extern int drm_framebuffer_init(struct drm_device *dev, 1004extern int drm_framebuffer_init(struct drm_device *dev,
956 struct drm_framebuffer *fb, 1005 struct drm_framebuffer *fb,
957 const struct drm_framebuffer_funcs *funcs); 1006 const struct drm_framebuffer_funcs *funcs);
@@ -962,10 +1011,6 @@ extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
962extern void drm_framebuffer_remove(struct drm_framebuffer *fb); 1011extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
963extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb); 1012extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
964extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb); 1013extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
965extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
966extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
967extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
968extern bool drm_crtc_in_use(struct drm_crtc *crtc);
969 1014
970extern void drm_object_attach_property(struct drm_mode_object *obj, 1015extern void drm_object_attach_property(struct drm_mode_object *obj,
971 struct drm_property *property, 1016 struct drm_property *property,
@@ -990,7 +1035,6 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
990extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, 1035extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
991 char *formats[]); 1036 char *formats[]);
992extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); 1037extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
993extern int drm_mode_create_dithering_property(struct drm_device *dev);
994extern int drm_mode_create_dirty_info_property(struct drm_device *dev); 1038extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
995extern const char *drm_get_encoder_name(const struct drm_encoder *encoder); 1039extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
996 1040
@@ -1040,17 +1084,12 @@ extern int drm_mode_getblob_ioctl(struct drm_device *dev,
1040 void *data, struct drm_file *file_priv); 1084 void *data, struct drm_file *file_priv);
1041extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev, 1085extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
1042 void *data, struct drm_file *file_priv); 1086 void *data, struct drm_file *file_priv);
1043extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
1044 void *data, struct drm_file *file_priv);
1045extern int drm_mode_replacefb(struct drm_device *dev,
1046 void *data, struct drm_file *file_priv);
1047extern int drm_mode_getencoder(struct drm_device *dev, 1087extern int drm_mode_getencoder(struct drm_device *dev,
1048 void *data, struct drm_file *file_priv); 1088 void *data, struct drm_file *file_priv);
1049extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, 1089extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
1050 void *data, struct drm_file *file_priv); 1090 void *data, struct drm_file *file_priv);
1051extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, 1091extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
1052 void *data, struct drm_file *file_priv); 1092 void *data, struct drm_file *file_priv);
1053extern u8 *drm_find_cea_extension(struct edid *edid);
1054extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match); 1093extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
1055extern bool drm_detect_hdmi_monitor(struct edid *edid); 1094extern bool drm_detect_hdmi_monitor(struct edid *edid);
1056extern bool drm_detect_monitor_audio(struct edid *edid); 1095extern bool drm_detect_monitor_audio(struct edid *edid);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index e8e1417af3d9..ae8dbfb1207c 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -342,13 +342,42 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
342u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], 342u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
343 int lane); 343 int lane);
344 344
345#define DP_RECEIVER_CAP_SIZE 0xf 345#define DP_RECEIVER_CAP_SIZE 0xf
346#define EDP_PSR_RECEIVER_CAP_SIZE 2
347
346void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); 348void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
347void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); 349void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
348 350
349u8 drm_dp_link_rate_to_bw_code(int link_rate); 351u8 drm_dp_link_rate_to_bw_code(int link_rate);
350int drm_dp_bw_code_to_link_rate(u8 link_bw); 352int drm_dp_bw_code_to_link_rate(u8 link_bw);
351 353
354struct edp_sdp_header {
355 u8 HB0; /* Secondary Data Packet ID */
356 u8 HB1; /* Secondary Data Packet Type */
357 u8 HB2; /* 7:5 reserved, 4:0 revision number */
358 u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
359} __packed;
360
361#define EDP_SDP_HEADER_REVISION_MASK 0x1F
362#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
363
364struct edp_vsc_psr {
365 struct edp_sdp_header sdp_header;
366 u8 DB0; /* Stereo Interface */
367 u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
368 u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
369 u8 DB3; /* CRC value bits 15:8 of the R or Cr component */
370 u8 DB4; /* CRC value bits 7:0 of the G or Y component */
371 u8 DB5; /* CRC value bits 15:8 of the G or Y component */
372 u8 DB6; /* CRC value bits 7:0 of the B or Cb component */
373 u8 DB7; /* CRC value bits 15:8 of the B or Cb component */
374 u8 DB8_31[24]; /* Reserved */
375} __packed;
376
377#define EDP_VSC_PSR_STATE_ACTIVE (1<<0)
378#define EDP_VSC_PSR_UPDATE_RFB (1<<1)
379#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
380
352static inline int 381static inline int
353drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]) 382drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
354{ 383{
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index fc481fc17085..a1441c5ac63d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -256,9 +256,11 @@ struct drm_encoder;
256struct drm_connector; 256struct drm_connector;
257struct drm_display_mode; 257struct drm_display_mode;
258struct hdmi_avi_infoframe; 258struct hdmi_avi_infoframe;
259struct hdmi_vendor_infoframe;
259 260
260void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); 261void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
261int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); 262int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
263int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
262int drm_av_sync_delay(struct drm_connector *connector, 264int drm_av_sync_delay(struct drm_connector *connector,
263 struct drm_display_mode *mode); 265 struct drm_display_mode *mode);
264struct drm_connector *drm_select_eld(struct drm_encoder *encoder, 266struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
@@ -268,5 +270,8 @@ int drm_load_edid_firmware(struct drm_connector *connector);
268int 270int
269drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, 271drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
270 const struct drm_display_mode *mode); 272 const struct drm_display_mode *mode);
273int
274drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
275 const struct drm_display_mode *mode);
271 276
272#endif /* __DRM_EDID_H__ */ 277#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 4a3fc244301c..c54cf3d4a03f 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -24,7 +24,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
24 unsigned int plane); 24 unsigned int plane);
25 25
26#ifdef CONFIG_DEBUG_FS 26#ifdef CONFIG_DEBUG_FS
27void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m);
28int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg); 27int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
29#endif 28#endif
30 29
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index f5e1168c7647..d639049a613d 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -84,12 +84,12 @@ static inline int drm_fixp2int(int64_t a)
84 return ((s64)a) >> DRM_FIXED_POINT; 84 return ((s64)a) >> DRM_FIXED_POINT;
85} 85}
86 86
87static inline s64 drm_fixp_msbset(int64_t a) 87static inline unsigned drm_fixp_msbset(int64_t a)
88{ 88{
89 unsigned shift, sign = (a >> 63) & 1; 89 unsigned shift, sign = (a >> 63) & 1;
90 90
91 for (shift = 62; shift > 0; --shift) 91 for (shift = 62; shift > 0; --shift)
92 if ((a >> shift) != sign) 92 if (((a >> shift) & 1) != sign)
93 return shift; 93 return shift;
94 94
95 return 0; 95 return 0;
@@ -100,9 +100,9 @@ static inline s64 drm_fixp_mul(s64 a, s64 b)
100 unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b); 100 unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b);
101 s64 result; 101 s64 result;
102 102
103 if (shift > 63) { 103 if (shift > 61) {
104 shift = shift - 63; 104 shift = shift - 61;
105 a >>= shift >> 1; 105 a >>= (shift >> 1) + (shift & 1);
106 b >>= shift >> 1; 106 b >>= shift >> 1;
107 } else 107 } else
108 shift = 0; 108 shift = 0;
@@ -120,7 +120,7 @@ static inline s64 drm_fixp_mul(s64 a, s64 b)
120 120
121static inline s64 drm_fixp_div(s64 a, s64 b) 121static inline s64 drm_fixp_div(s64 a, s64 b)
122{ 122{
123 unsigned shift = 63 - drm_fixp_msbset(a); 123 unsigned shift = 62 - drm_fixp_msbset(a);
124 s64 result; 124 s64 result;
125 125
126 a <<= shift; 126 a <<= shift;
@@ -154,7 +154,7 @@ static inline s64 drm_fixp_exp(s64 x)
154 } 154 }
155 155
156 if (x < 0) 156 if (x < 0)
157 sum = drm_fixp_div(1, sum); 157 sum = drm_fixp_div(DRM_FIXED_ONE, sum);
158 158
159 return sum; 159 return sum;
160} 160}
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
new file mode 100644
index 000000000000..35c776ae7d3b
--- /dev/null
+++ b/include/drm/drm_flip_work.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef DRM_FLIP_WORK_H
25#define DRM_FLIP_WORK_H
26
27#include <linux/kfifo.h>
28#include <linux/workqueue.h>
29
30/**
31 * DOC: flip utils
32 *
33 * Util to queue up work to run from work-queue context after flip/vblank.
34 * Typically this can be used to defer unref of framebuffer's, cursor
35 * bo's, etc until after vblank. The APIs are all safe (and lockless)
36 * for up to one producer and once consumer at a time. The single-consumer
37 * aspect is ensured by committing the queued work to a single work-queue.
38 */
39
40struct drm_flip_work;
41
42/*
43 * drm_flip_func_t - callback function
44 *
45 * @work: the flip work
46 * @val: value queued via drm_flip_work_queue()
47 *
48 * Callback function to be called for each of the queue'd work items after
49 * drm_flip_work_commit() is called.
50 */
51typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
52
53/**
54 * struct drm_flip_work - flip work queue
55 * @name: debug name
56 * @pending: number of queued but not committed items
57 * @count: number of committed items
58 * @func: callback fxn called for each committed item
59 * @worker: worker which calls @func
60 */
61struct drm_flip_work {
62 const char *name;
63 atomic_t pending, count;
64 drm_flip_func_t func;
65 struct work_struct worker;
66 DECLARE_KFIFO_PTR(fifo, void *);
67};
68
69void drm_flip_work_queue(struct drm_flip_work *work, void *val);
70void drm_flip_work_commit(struct drm_flip_work *work,
71 struct workqueue_struct *wq);
72int drm_flip_work_init(struct drm_flip_work *work, int size,
73 const char *name, drm_flip_func_t func);
74void drm_flip_work_cleanup(struct drm_flip_work *work);
75
76#endif /* DRM_FLIP_WORK_H */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index c34f27f80bcc..89b4d7db1ebd 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -30,14 +30,6 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
30/* set vm_flags and we can change the vm attribute to other one at here. */ 30/* set vm_flags and we can change the vm attribute to other one at here. */
31int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); 31int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
32 32
33/*
34 * destroy memory region allocated.
35 * - a gem handle and physical memory region pointed by a gem object
36 * would be released by drm_gem_handle_delete().
37 */
38int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
39 struct drm_device *drm, unsigned int handle);
40
41/* allocate physical memory. */ 33/* allocate physical memory. */
42struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, 34struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
43 unsigned int size); 35 unsigned int size);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 4d06edb56d5f..cba67865d18f 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -36,11 +36,19 @@
36/* 36/*
37 * Generic range manager structs 37 * Generic range manager structs
38 */ 38 */
39#include <linux/bug.h>
40#include <linux/kernel.h>
39#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/spinlock.h>
40#ifdef CONFIG_DEBUG_FS 43#ifdef CONFIG_DEBUG_FS
41#include <linux/seq_file.h> 44#include <linux/seq_file.h>
42#endif 45#endif
43 46
47enum drm_mm_search_flags {
48 DRM_MM_SEARCH_DEFAULT = 0,
49 DRM_MM_SEARCH_BEST = 1 << 0,
50};
51
44struct drm_mm_node { 52struct drm_mm_node {
45 struct list_head node_list; 53 struct list_head node_list;
46 struct list_head hole_stack; 54 struct list_head hole_stack;
@@ -62,9 +70,6 @@ struct drm_mm {
62 /* head_node.node_list is the list of all memory nodes, ordered 70 /* head_node.node_list is the list of all memory nodes, ordered
63 * according to the (increasing) start address of the memory node. */ 71 * according to the (increasing) start address of the memory node. */
64 struct drm_mm_node head_node; 72 struct drm_mm_node head_node;
65 struct list_head unused_nodes;
66 int num_unused;
67 spinlock_t unused_lock;
68 unsigned int scan_check_range : 1; 73 unsigned int scan_check_range : 1;
69 unsigned scan_alignment; 74 unsigned scan_alignment;
70 unsigned long scan_color; 75 unsigned long scan_color;
@@ -115,13 +120,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
115#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 120#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
116 &(mm)->head_node.node_list, \ 121 &(mm)->head_node.node_list, \
117 node_list) 122 node_list)
118#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
119 for (entry = (mm)->prev_scanned_node, \
120 next = entry ? list_entry(entry->node_list.next, \
121 struct drm_mm_node, node_list) : NULL; \
122 entry != NULL; entry = next, \
123 next = entry ? list_entry(entry->node_list.next, \
124 struct drm_mm_node, node_list) : NULL) \
125 123
126/* Note that we need to unroll list_for_each_entry in order to inline 124/* Note that we need to unroll list_for_each_entry in order to inline
127 * setting hole_start and hole_end on each iteration and keep the 125 * setting hole_start and hole_end on each iteration and keep the
@@ -138,124 +136,50 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
138/* 136/*
139 * Basic range manager support (drm_mm.c) 137 * Basic range manager support (drm_mm.c)
140 */ 138 */
141extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, 139extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
142 unsigned long start,
143 unsigned long size,
144 bool atomic);
145extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
146 unsigned long size,
147 unsigned alignment,
148 unsigned long color,
149 int atomic);
150extern struct drm_mm_node *drm_mm_get_block_range_generic(
151 struct drm_mm_node *node,
152 unsigned long size,
153 unsigned alignment,
154 unsigned long color,
155 unsigned long start,
156 unsigned long end,
157 int atomic);
158static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
159 unsigned long size,
160 unsigned alignment)
161{
162 return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
163}
164static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
165 unsigned long size,
166 unsigned alignment)
167{
168 return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
169}
170static inline struct drm_mm_node *drm_mm_get_block_range(
171 struct drm_mm_node *parent,
172 unsigned long size,
173 unsigned alignment,
174 unsigned long start,
175 unsigned long end)
176{
177 return drm_mm_get_block_range_generic(parent, size, alignment, 0,
178 start, end, 0);
179}
180static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
181 struct drm_mm_node *parent,
182 unsigned long size,
183 unsigned alignment,
184 unsigned long start,
185 unsigned long end)
186{
187 return drm_mm_get_block_range_generic(parent, size, alignment, 0,
188 start, end, 1);
189}
190 140
191extern int drm_mm_insert_node(struct drm_mm *mm,
192 struct drm_mm_node *node,
193 unsigned long size,
194 unsigned alignment);
195extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
196 struct drm_mm_node *node,
197 unsigned long size,
198 unsigned alignment,
199 unsigned long start,
200 unsigned long end);
201extern int drm_mm_insert_node_generic(struct drm_mm *mm, 141extern int drm_mm_insert_node_generic(struct drm_mm *mm,
202 struct drm_mm_node *node, 142 struct drm_mm_node *node,
203 unsigned long size, 143 unsigned long size,
204 unsigned alignment, 144 unsigned alignment,
205 unsigned long color); 145 unsigned long color,
146 enum drm_mm_search_flags flags);
147static inline int drm_mm_insert_node(struct drm_mm *mm,
148 struct drm_mm_node *node,
149 unsigned long size,
150 unsigned alignment,
151 enum drm_mm_search_flags flags)
152{
153 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
154}
155
206extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 156extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
207 struct drm_mm_node *node, 157 struct drm_mm_node *node,
208 unsigned long size, 158 unsigned long size,
209 unsigned alignment, 159 unsigned alignment,
210 unsigned long color, 160 unsigned long color,
211 unsigned long start, 161 unsigned long start,
212 unsigned long end); 162 unsigned long end,
213extern void drm_mm_put_block(struct drm_mm_node *cur); 163 enum drm_mm_search_flags flags);
214extern void drm_mm_remove_node(struct drm_mm_node *node); 164static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
215extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 165 struct drm_mm_node *node,
216extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 166 unsigned long size,
217 unsigned long size, 167 unsigned alignment,
218 unsigned alignment, 168 unsigned long start,
219 unsigned long color, 169 unsigned long end,
220 bool best_match); 170 enum drm_mm_search_flags flags)
221extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
222 const struct drm_mm *mm,
223 unsigned long size,
224 unsigned alignment,
225 unsigned long color,
226 unsigned long start,
227 unsigned long end,
228 bool best_match);
229static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
230 unsigned long size,
231 unsigned alignment,
232 bool best_match)
233{ 171{
234 return drm_mm_search_free_generic(mm,size, alignment, 0, best_match); 172 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
235} 173 0, start, end, flags);
236static inline struct drm_mm_node *drm_mm_search_free_in_range(
237 const struct drm_mm *mm,
238 unsigned long size,
239 unsigned alignment,
240 unsigned long start,
241 unsigned long end,
242 bool best_match)
243{
244 return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
245 start, end, best_match);
246} 174}
247 175
176extern void drm_mm_remove_node(struct drm_mm_node *node);
177extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
248extern void drm_mm_init(struct drm_mm *mm, 178extern void drm_mm_init(struct drm_mm *mm,
249 unsigned long start, 179 unsigned long start,
250 unsigned long size); 180 unsigned long size);
251extern void drm_mm_takedown(struct drm_mm *mm); 181extern void drm_mm_takedown(struct drm_mm *mm);
252extern int drm_mm_clean(struct drm_mm *mm); 182extern int drm_mm_clean(struct drm_mm *mm);
253extern int drm_mm_pre_get(struct drm_mm *mm);
254
255static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
256{
257 return block->mm;
258}
259 183
260void drm_mm_init_scan(struct drm_mm *mm, 184void drm_mm_init_scan(struct drm_mm *mm,
261 unsigned long size, 185 unsigned long size,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 34efaf64cc87..fd54a14a7c2a 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -1,4 +1,22 @@
1#define radeon_PCI_IDS \ 1#define radeon_PCI_IDS \
2 {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3 {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
4 {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
5 {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
6 {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
7 {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
8 {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
9 {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
10 {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
11 {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
12 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
13 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
14 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
15 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
16 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
17 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
18 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
19 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 20 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
3 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 21 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
4 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 22 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -690,29 +708,6 @@
690 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \ 708 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
691 {0, 0, 0} 709 {0, 0, 0}
692 710
693#define mach64_PCI_IDS \
694 {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
695 {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
696 {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
697 {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
698 {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
699 {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
700 {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
701 {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
702 {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
703 {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
704 {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
705 {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
706 {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
707 {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
708 {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
709 {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
710 {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
711 {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
712 {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
713 {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
714 {0, 0, 0}
715
716#define sisdrv_PCI_IDS \ 711#define sisdrv_PCI_IDS \
717 {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 712 {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
718 {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 713 {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
@@ -752,10 +747,6 @@
752 {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 747 {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
753 {0, 0, 0} 748 {0, 0, 0}
754 749
755#define gamma_PCI_IDS \
756 {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
757 {0, 0, 0}
758
759#define savage_PCI_IDS \ 750#define savage_PCI_IDS \
760 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ 751 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
761 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ 752 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
@@ -781,6 +772,3 @@
781 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ 772 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
782 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ 773 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
783 {0, 0, 0} 774 {0, 0, 0}
784
785#define ffb_PCI_IDS \
786 {0, 0, 0}
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
new file mode 100644
index 000000000000..c18a593d1744
--- /dev/null
+++ b/include/drm/drm_vma_manager.h
@@ -0,0 +1,257 @@
1#ifndef __DRM_VMA_MANAGER_H__
2#define __DRM_VMA_MANAGER_H__
3
4/*
5 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <drm/drm_mm.h>
27#include <linux/fs.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/rbtree.h>
31#include <linux/spinlock.h>
32#include <linux/types.h>
33
34struct drm_vma_offset_file {
35 struct rb_node vm_rb;
36 struct file *vm_filp;
37 unsigned long vm_count;
38};
39
40struct drm_vma_offset_node {
41 rwlock_t vm_lock;
42 struct drm_mm_node vm_node;
43 struct rb_node vm_rb;
44 struct rb_root vm_files;
45};
46
47struct drm_vma_offset_manager {
48 rwlock_t vm_lock;
49 struct rb_root vm_addr_space_rb;
50 struct drm_mm vm_addr_space_mm;
51};
52
53void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
54 unsigned long page_offset, unsigned long size);
55void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
56
57struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
58 unsigned long start,
59 unsigned long pages);
60struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
61 unsigned long start,
62 unsigned long pages);
63int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
64 struct drm_vma_offset_node *node, unsigned long pages);
65void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
66 struct drm_vma_offset_node *node);
67
68int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
69void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
70bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
71 struct file *filp);
72
73/**
74 * drm_vma_offset_exact_lookup() - Look up node by exact address
75 * @mgr: Manager object
76 * @start: Start address (page-based, not byte-based)
77 * @pages: Size of object (page-based)
78 *
79 * Same as drm_vma_offset_lookup() but does not allow any offset into the node.
80 * It only returns the exact object with the given start address.
81 *
82 * RETURNS:
83 * Node at exact start address @start.
84 */
85static inline struct drm_vma_offset_node *
86drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
87 unsigned long start,
88 unsigned long pages)
89{
90 struct drm_vma_offset_node *node;
91
92 node = drm_vma_offset_lookup(mgr, start, pages);
93 return (node && node->vm_node.start == start) ? node : NULL;
94}
95
96/**
97 * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
98 * @mgr: Manager object
99 *
100 * Lock VMA manager for extended lookups. Only *_locked() VMA function calls
101 * are allowed while holding this lock. All other contexts are blocked from VMA
102 * until the lock is released via drm_vma_offset_unlock_lookup().
103 *
104 * Use this if you need to take a reference to the objects returned by
105 * drm_vma_offset_lookup_locked() before releasing this lock again.
106 *
107 * This lock must not be used for anything else than extended lookups. You must
108 * not call any other VMA helpers while holding this lock.
109 *
110 * Note: You're in atomic-context while holding this lock!
111 *
112 * Example:
113 * drm_vma_offset_lock_lookup(mgr);
114 * node = drm_vma_offset_lookup_locked(mgr);
115 * if (node)
116 * kref_get_unless_zero(container_of(node, sth, entr));
117 * drm_vma_offset_unlock_lookup(mgr);
118 */
119static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
120{
121 read_lock(&mgr->vm_lock);
122}
123
124/**
125 * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
126 * @mgr: Manager object
127 *
128 * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
129 */
130static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
131{
132 read_unlock(&mgr->vm_lock);
133}
134
135/**
136 * drm_vma_node_reset() - Initialize or reset node object
137 * @node: Node to initialize or reset
138 *
139 * Reset a node to its initial state. This must be called before using it with
140 * any VMA offset manager.
141 *
142 * This must not be called on an already allocated node, or you will leak
143 * memory.
144 */
145static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
146{
147 memset(node, 0, sizeof(*node));
148 node->vm_files = RB_ROOT;
149 rwlock_init(&node->vm_lock);
150}
151
152/**
153 * drm_vma_node_start() - Return start address for page-based addressing
154 * @node: Node to inspect
155 *
156 * Return the start address of the given node. This can be used as offset into
157 * the linear VM space that is provided by the VMA offset manager. Note that
158 * this can only be used for page-based addressing. If you need a proper offset
159 * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
160 * drm_vma_node_offset_addr() helper instead.
161 *
162 * RETURNS:
163 * Start address of @node for page-based addressing. 0 if the node does not
164 * have an offset allocated.
165 */
166static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
167{
168 return node->vm_node.start;
169}
170
171/**
172 * drm_vma_node_size() - Return size (page-based)
173 * @node: Node to inspect
174 *
175 * Return the size as number of pages for the given node. This is the same size
176 * that was passed to drm_vma_offset_add(). If no offset is allocated for the
177 * node, this is 0.
178 *
179 * RETURNS:
180 * Size of @node as number of pages. 0 if the node does not have an offset
181 * allocated.
182 */
183static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
184{
185 return node->vm_node.size;
186}
187
188/**
189 * drm_vma_node_has_offset() - Check whether node is added to offset manager
190 * @node: Node to be checked
191 *
192 * RETURNS:
193 * true iff the node was previously allocated an offset and added to
194 * an vma offset manager.
195 */
196static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
197{
198 return drm_mm_node_allocated(&node->vm_node);
199}
200
201/**
202 * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
203 * @node: Linked offset node
204 *
205 * Same as drm_vma_node_start() but returns the address as a valid offset that
206 * can be used for user-space mappings during mmap().
207 * This must not be called on unlinked nodes.
208 *
209 * RETURNS:
210 * Offset of @node for byte-based addressing. 0 if the node does not have an
211 * object allocated.
212 */
213static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
214{
215 return ((__u64)node->vm_node.start) << PAGE_SHIFT;
216}
217
218/**
219 * drm_vma_node_unmap() - Unmap offset node
220 * @node: Offset node
221 * @file_mapping: Address space to unmap @node from
222 *
223 * Unmap all userspace mappings for a given offset node. The mappings must be
224 * associated with the @file_mapping address-space. If no offset exists or
225 * the address-space is invalid, nothing is done.
226 *
227 * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
228 * is not called on this node concurrently.
229 */
230static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
231 struct address_space *file_mapping)
232{
233 if (file_mapping && drm_vma_node_has_offset(node))
234 unmap_mapping_range(file_mapping,
235 drm_vma_node_offset_addr(node),
236 drm_vma_node_size(node) << PAGE_SHIFT, 1);
237}
238
239/**
240 * drm_vma_node_verify_access() - Access verification helper for TTM
241 * @node: Offset node
242 * @filp: Open-file
243 *
244 * This checks whether @filp is granted access to @node. It is the same as
245 * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
246 * verify_access() callbacks.
247 *
248 * RETURNS:
249 * 0 if access is granted, -EACCES otherwise.
250 */
251static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
252 struct file *filp)
253{
254 return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
255}
256
257#endif /* __DRM_VMA_MANAGER_H__ */
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
new file mode 100644
index 000000000000..3e419d92cf5a
--- /dev/null
+++ b/include/drm/i2c/tda998x.h
@@ -0,0 +1,30 @@
1#ifndef __DRM_I2C_TDA998X_H__
2#define __DRM_I2C_TDA998X_H__
3
4struct tda998x_encoder_params {
5 u8 swap_b:3;
6 u8 mirr_b:1;
7 u8 swap_a:3;
8 u8 mirr_a:1;
9 u8 swap_d:3;
10 u8 mirr_d:1;
11 u8 swap_c:3;
12 u8 mirr_c:1;
13 u8 swap_f:3;
14 u8 mirr_f:1;
15 u8 swap_e:3;
16 u8 mirr_e:1;
17
18 u8 audio_cfg;
19 u8 audio_clk_cfg;
20 u8 audio_frame[6];
21
22 enum {
23 AFMT_SPDIF,
24 AFMT_I2S
25 } audio_format;
26
27 unsigned audio_sample_rate;
28};
29
30#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 8a6aa56ece52..751eaffbf0d5 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -32,12 +32,12 @@
32#define _TTM_BO_API_H_ 32#define _TTM_BO_API_H_
33 33
34#include <drm/drm_hashtab.h> 34#include <drm/drm_hashtab.h>
35#include <drm/drm_vma_manager.h>
35#include <linux/kref.h> 36#include <linux/kref.h>
36#include <linux/list.h> 37#include <linux/list.h>
37#include <linux/wait.h> 38#include <linux/wait.h>
38#include <linux/mutex.h> 39#include <linux/mutex.h>
39#include <linux/mm.h> 40#include <linux/mm.h>
40#include <linux/rbtree.h>
41#include <linux/bitmap.h> 41#include <linux/bitmap.h>
42#include <linux/reservation.h> 42#include <linux/reservation.h>
43 43
@@ -145,7 +145,6 @@ struct ttm_tt;
145 * @type: The bo type. 145 * @type: The bo type.
146 * @destroy: Destruction function. If NULL, kfree is used. 146 * @destroy: Destruction function. If NULL, kfree is used.
147 * @num_pages: Actual number of pages. 147 * @num_pages: Actual number of pages.
148 * @addr_space_offset: Address space offset.
149 * @acc_size: Accounted size for this object. 148 * @acc_size: Accounted size for this object.
150 * @kref: Reference count of this buffer object. When this refcount reaches 149 * @kref: Reference count of this buffer object. When this refcount reaches
151 * zero, the object is put on the delayed delete list. 150 * zero, the object is put on the delayed delete list.
@@ -166,8 +165,7 @@ struct ttm_tt;
166 * @swap: List head for swap LRU list. 165 * @swap: List head for swap LRU list.
167 * @sync_obj: Pointer to a synchronization object. 166 * @sync_obj: Pointer to a synchronization object.
168 * @priv_flags: Flags describing buffer object internal state. 167 * @priv_flags: Flags describing buffer object internal state.
169 * @vm_rb: Rb node for the vm rb tree. 168 * @vma_node: Address space manager node.
170 * @vm_node: Address space manager node.
171 * @offset: The current GPU offset, which can have different meanings 169 * @offset: The current GPU offset, which can have different meanings
172 * depending on the memory type. For SYSTEM type memory, it should be 0. 170 * depending on the memory type. For SYSTEM type memory, it should be 0.
173 * @cur_placement: Hint of current placement. 171 * @cur_placement: Hint of current placement.
@@ -194,7 +192,6 @@ struct ttm_buffer_object {
194 enum ttm_bo_type type; 192 enum ttm_bo_type type;
195 void (*destroy) (struct ttm_buffer_object *); 193 void (*destroy) (struct ttm_buffer_object *);
196 unsigned long num_pages; 194 unsigned long num_pages;
197 uint64_t addr_space_offset;
198 size_t acc_size; 195 size_t acc_size;
199 196
200 /** 197 /**
@@ -238,13 +235,7 @@ struct ttm_buffer_object {
238 void *sync_obj; 235 void *sync_obj;
239 unsigned long priv_flags; 236 unsigned long priv_flags;
240 237
241 /** 238 struct drm_vma_offset_node vma_node;
242 * Members protected by the bdev::vm_lock
243 */
244
245 struct rb_node vm_rb;
246 struct drm_mm_node *vm_node;
247
248 239
249 /** 240 /**
250 * Special members that are protected by the reserve lock 241 * Special members that are protected by the reserve lock
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 984fc2d571a1..8639c85d61c4 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -36,6 +36,7 @@
36#include <ttm/ttm_placement.h> 36#include <ttm/ttm_placement.h>
37#include <drm/drm_mm.h> 37#include <drm/drm_mm.h>
38#include <drm/drm_global.h> 38#include <drm/drm_global.h>
39#include <drm/drm_vma_manager.h>
39#include <linux/workqueue.h> 40#include <linux/workqueue.h>
40#include <linux/fs.h> 41#include <linux/fs.h>
41#include <linux/spinlock.h> 42#include <linux/spinlock.h>
@@ -519,7 +520,7 @@ struct ttm_bo_global {
519 * @man: An array of mem_type_managers. 520 * @man: An array of mem_type_managers.
520 * @fence_lock: Protects the synchronizing members on *all* bos belonging 521 * @fence_lock: Protects the synchronizing members on *all* bos belonging
521 * to this device. 522 * to this device.
522 * @addr_space_mm: Range manager for the device address space. 523 * @vma_manager: Address space manager
523 * lru_lock: Spinlock that protects the buffer+device lru lists and 524 * lru_lock: Spinlock that protects the buffer+device lru lists and
524 * ddestroy lists. 525 * ddestroy lists.
525 * @val_seq: Current validation sequence. 526 * @val_seq: Current validation sequence.
@@ -537,14 +538,13 @@ struct ttm_bo_device {
537 struct list_head device_list; 538 struct list_head device_list;
538 struct ttm_bo_global *glob; 539 struct ttm_bo_global *glob;
539 struct ttm_bo_driver *driver; 540 struct ttm_bo_driver *driver;
540 rwlock_t vm_lock;
541 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 541 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
542 spinlock_t fence_lock; 542 spinlock_t fence_lock;
543
543 /* 544 /*
544 * Protected by the vm lock. 545 * Protected by internal locks.
545 */ 546 */
546 struct rb_root addr_space_rb; 547 struct drm_vma_offset_manager vma_manager;
547 struct drm_mm addr_space_mm;
548 548
549 /* 549 /*
550 * Protected by the global:lru lock. 550 * Protected by the global:lru lock.
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 15e997fa78f2..4aa2b48cd151 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -158,6 +158,8 @@
158#define VF610_CLK_GPU_SEL 145 158#define VF610_CLK_GPU_SEL 145
159#define VF610_CLK_GPU_EN 146 159#define VF610_CLK_GPU_EN 146
160#define VF610_CLK_GPU2D 147 160#define VF610_CLK_GPU2D 147
161#define VF610_CLK_END 148 161#define VF610_CLK_ENET0 148
162#define VF610_CLK_ENET1 149
163#define VF610_CLK_END 150
162 164
163#endif /* __DT_BINDINGS_CLOCK_VF610_H */ 165#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h
index 469e0325e6f4..2fbc804e1a45 100644
--- a/include/dt-bindings/pinctrl/am33xx.h
+++ b/include/dt-bindings/pinctrl/am33xx.h
@@ -5,7 +5,7 @@
5#ifndef _DT_BINDINGS_PINCTRL_AM33XX_H 5#ifndef _DT_BINDINGS_PINCTRL_AM33XX_H
6#define _DT_BINDINGS_PINCTRL_AM33XX_H 6#define _DT_BINDINGS_PINCTRL_AM33XX_H
7 7
8#include <include/dt-bindings/pinctrl/omap.h> 8#include <dt-bindings/pinctrl/omap.h>
9 9
10/* am33xx specific mux bit defines */ 10/* am33xx specific mux bit defines */
11#undef PULL_ENA 11#undef PULL_ENA
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6ad72f92469c..353ba256f368 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -191,7 +191,6 @@ extern bool wmi_has_guid(const char *guid);
191#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 191#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200
192#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 192#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
193#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 193#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
194#define ACPI_VIDEO_SKIP_BACKLIGHT 0x1000
195 194
196#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) 195#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
197 196
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 297462b9f41a..e9ac882868c0 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -542,8 +542,7 @@ int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
542bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); 542bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
543 543
544int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); 544int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
545int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id, 545int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
546 char *buf, size_t buflen);
547 546
548int cgroup_task_count(const struct cgroup *cgrp); 547int cgroup_task_count(const struct cgroup *cgrp);
549 548
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 6e7ec64b69ab..b613ffd402d1 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -1,86 +1,55 @@
1/* Add subsystem definitions of the form SUBSYS(<name>) in this 1/*
2 * file. Surround each one by a line of comment markers so that 2 * List of cgroup subsystems.
3 * patches don't collide 3 *
4 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
4 */ 5 */
5
6/* */
7
8/* */
9
10#if IS_SUBSYS_ENABLED(CONFIG_CPUSETS) 6#if IS_SUBSYS_ENABLED(CONFIG_CPUSETS)
11SUBSYS(cpuset) 7SUBSYS(cpuset)
12#endif 8#endif
13 9
14/* */
15
16#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG) 10#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG)
17SUBSYS(debug) 11SUBSYS(debug)
18#endif 12#endif
19 13
20/* */
21
22#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED) 14#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED)
23SUBSYS(cpu_cgroup) 15SUBSYS(cpu_cgroup)
24#endif 16#endif
25 17
26/* */
27
28#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT) 18#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT)
29SUBSYS(cpuacct) 19SUBSYS(cpuacct)
30#endif 20#endif
31 21
32/* */
33
34#if IS_SUBSYS_ENABLED(CONFIG_MEMCG) 22#if IS_SUBSYS_ENABLED(CONFIG_MEMCG)
35SUBSYS(mem_cgroup) 23SUBSYS(mem_cgroup)
36#endif 24#endif
37 25
38/* */
39
40#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE) 26#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE)
41SUBSYS(devices) 27SUBSYS(devices)
42#endif 28#endif
43 29
44/* */
45
46#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER) 30#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER)
47SUBSYS(freezer) 31SUBSYS(freezer)
48#endif 32#endif
49 33
50/* */
51
52#if IS_SUBSYS_ENABLED(CONFIG_NET_CLS_CGROUP) 34#if IS_SUBSYS_ENABLED(CONFIG_NET_CLS_CGROUP)
53SUBSYS(net_cls) 35SUBSYS(net_cls)
54#endif 36#endif
55 37
56/* */
57
58#if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP) 38#if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP)
59SUBSYS(blkio) 39SUBSYS(blkio)
60#endif 40#endif
61 41
62/* */
63
64#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF) 42#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF)
65SUBSYS(perf) 43SUBSYS(perf)
66#endif 44#endif
67 45
68/* */
69
70#if IS_SUBSYS_ENABLED(CONFIG_NETPRIO_CGROUP) 46#if IS_SUBSYS_ENABLED(CONFIG_NETPRIO_CGROUP)
71SUBSYS(net_prio) 47SUBSYS(net_prio)
72#endif 48#endif
73 49
74/* */
75
76#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB) 50#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB)
77SUBSYS(hugetlb) 51SUBSYS(hugetlb)
78#endif 52#endif
79 53/*
80/* */ 54 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
81 55 */
82#ifdef CONFIG_CGROUP_BCACHE
83SUBSYS(bcache)
84#endif
85
86/* */
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index b3cb71f0d3b0..a9c96d865ee7 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -3,10 +3,6 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#define CRC_T10DIF_DIGEST_SIZE 2
7#define CRC_T10DIF_BLOCK_SIZE 1
8
9__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len);
10__u16 crc_t10dif(unsigned char const *, size_t); 6__u16 crc_t10dif(unsigned char const *, size_t);
11 7
12#endif 8#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c9d468..4a12532da8c4 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -336,6 +336,7 @@ extern int d_validate(struct dentry *, struct dentry *);
336 * helper function for dentry_operations.d_dname() members 336 * helper function for dentry_operations.d_dname() members
337 */ 337 */
338extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 338extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
339extern char *simple_dname(struct dentry *, char *, int);
339 340
340extern char *__d_path(const struct path *, const struct path *, char *, int); 341extern char *__d_path(const struct path *, const struct path *, char *, int);
341extern char *d_absolute_path(const struct path *, char *, int); 342extern char *d_absolute_path(const struct path *, char *, int);
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 1b4d4ee1168f..de7d74ab3de6 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -177,7 +177,11 @@ enum drbd_ret_code {
177 ERR_NEED_APV_100 = 163, 177 ERR_NEED_APV_100 = 163,
178 ERR_NEED_ALLOW_TWO_PRI = 164, 178 ERR_NEED_ALLOW_TWO_PRI = 164,
179 ERR_MD_UNCLEAN = 165, 179 ERR_MD_UNCLEAN = 165,
180 180 ERR_MD_LAYOUT_CONNECTED = 166,
181 ERR_MD_LAYOUT_TOO_BIG = 167,
182 ERR_MD_LAYOUT_TOO_SMALL = 168,
183 ERR_MD_LAYOUT_NO_FIT = 169,
184 ERR_IMPLICIT_SHRINK = 170,
181 /* insert new ones above this line */ 185 /* insert new ones above this line */
182 AFTER_LAST_ERR_CODE 186 AFTER_LAST_ERR_CODE
183}; 187};
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index d0d8fac8a6e4..e8c44572b8cb 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -181,6 +181,8 @@ GENL_struct(DRBD_NLA_RESIZE_PARMS, 7, resize_parms,
181 __u64_field(1, DRBD_GENLA_F_MANDATORY, resize_size) 181 __u64_field(1, DRBD_GENLA_F_MANDATORY, resize_size)
182 __flg_field(2, DRBD_GENLA_F_MANDATORY, resize_force) 182 __flg_field(2, DRBD_GENLA_F_MANDATORY, resize_force)
183 __flg_field(3, DRBD_GENLA_F_MANDATORY, no_resync) 183 __flg_field(3, DRBD_GENLA_F_MANDATORY, no_resync)
184 __u32_field_def(4, 0 /* OPTIONAL */, al_stripes, DRBD_AL_STRIPES_DEF)
185 __u32_field_def(5, 0 /* OPTIONAL */, al_stripe_size, DRBD_AL_STRIPE_SIZE_DEF)
184) 186)
185 187
186GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info, 188GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info,
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 1fedf2b17cc8..17e50bb00521 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -215,4 +215,13 @@
215#define DRBD_ALWAYS_ASBP_DEF 0 215#define DRBD_ALWAYS_ASBP_DEF 0
216#define DRBD_USE_RLE_DEF 1 216#define DRBD_USE_RLE_DEF 1
217 217
218#define DRBD_AL_STRIPES_MIN 1
219#define DRBD_AL_STRIPES_MAX 1024
220#define DRBD_AL_STRIPES_DEF 1
221#define DRBD_AL_STRIPES_SCALE '1'
222
223#define DRBD_AL_STRIPE_SIZE_MIN 4
224#define DRBD_AL_STRIPE_SIZE_MAX 16777216
225#define DRBD_AL_STRIPE_SIZE_DEF 32
226#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */
218#endif 227#endif
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 0b763276f619..5c6d7fbaf89e 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -622,7 +622,7 @@ struct edac_raw_error_desc {
622 */ 622 */
623struct mem_ctl_info { 623struct mem_ctl_info {
624 struct device dev; 624 struct device dev;
625 struct bus_type bus; 625 struct bus_type *bus;
626 626
627 struct list_head link; /* for global list of mem_ctl_info structs */ 627 struct list_head link; /* for global list of mem_ctl_info structs */
628 628
@@ -742,4 +742,9 @@ struct mem_ctl_info {
742#endif 742#endif
743}; 743};
744 744
745/*
746 * Maximum number of memory controllers in the coherent fabric.
747 */
748#define EDAC_MAX_MCS 16
749
745#endif 750#endif
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 3b0e820375ab..5d7782e42b8f 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -436,6 +436,7 @@ struct fw_iso_context {
436 int type; 436 int type;
437 int channel; 437 int channel;
438 int speed; 438 int speed;
439 bool drop_overflow_headers;
439 size_t header_size; 440 size_t header_size;
440 union { 441 union {
441 fw_iso_callback_t sc; 442 fw_iso_callback_t sc;
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4372658c73ae..120d57a1c3a5 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -78,6 +78,11 @@ struct trace_iterator {
78 /* trace_seq for __print_flags() and __print_symbolic() etc. */ 78 /* trace_seq for __print_flags() and __print_symbolic() etc. */
79 struct trace_seq tmp_seq; 79 struct trace_seq tmp_seq;
80 80
81 cpumask_var_t started;
82
83 /* it's true when current open file is snapshot */
84 bool snapshot;
85
81 /* The below is zeroed out in pipe_read */ 86 /* The below is zeroed out in pipe_read */
82 struct trace_seq seq; 87 struct trace_seq seq;
83 struct trace_entry *ent; 88 struct trace_entry *ent;
@@ -90,10 +95,7 @@ struct trace_iterator {
90 loff_t pos; 95 loff_t pos;
91 long idx; 96 long idx;
92 97
93 cpumask_var_t started; 98 /* All new field here will be zeroed out in pipe_read */
94
95 /* it's true when current open file is snapshot */
96 bool snapshot;
97}; 99};
98 100
99enum trace_iter_flags { 101enum trace_iter_flags {
@@ -332,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
332 const char *name, int offset, int size, 334 const char *name, int offset, int size,
333 int is_signed, int filter_type); 335 int is_signed, int filter_type);
334extern int trace_add_event_call(struct ftrace_event_call *call); 336extern int trace_add_event_call(struct ftrace_event_call *call);
335extern void trace_remove_event_call(struct ftrace_event_call *call); 337extern int trace_remove_event_call(struct ftrace_event_call *call);
336 338
337#define is_signed_type(type) (((type)(-1)) < (type)1) 339#define is_signed_type(type) (((type)(-1)) < (type)1)
338 340
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 3b589440ecfe..9231be9e90a2 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -18,11 +18,21 @@ enum hdmi_infoframe_type {
18 HDMI_INFOFRAME_TYPE_AUDIO = 0x84, 18 HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
19}; 19};
20 20
21#define HDMI_IEEE_OUI 0x000c03
21#define HDMI_INFOFRAME_HEADER_SIZE 4 22#define HDMI_INFOFRAME_HEADER_SIZE 4
22#define HDMI_AVI_INFOFRAME_SIZE 13 23#define HDMI_AVI_INFOFRAME_SIZE 13
23#define HDMI_SPD_INFOFRAME_SIZE 25 24#define HDMI_SPD_INFOFRAME_SIZE 25
24#define HDMI_AUDIO_INFOFRAME_SIZE 10 25#define HDMI_AUDIO_INFOFRAME_SIZE 10
25 26
27#define HDMI_INFOFRAME_SIZE(type) \
28 (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
29
30struct hdmi_any_infoframe {
31 enum hdmi_infoframe_type type;
32 unsigned char version;
33 unsigned char length;
34};
35
26enum hdmi_colorspace { 36enum hdmi_colorspace {
27 HDMI_COLORSPACE_RGB, 37 HDMI_COLORSPACE_RGB,
28 HDMI_COLORSPACE_YUV422, 38 HDMI_COLORSPACE_YUV422,
@@ -100,9 +110,6 @@ struct hdmi_avi_infoframe {
100 unsigned char version; 110 unsigned char version;
101 unsigned char length; 111 unsigned char length;
102 enum hdmi_colorspace colorspace; 112 enum hdmi_colorspace colorspace;
103 bool active_info_valid;
104 bool horizontal_bar_valid;
105 bool vertical_bar_valid;
106 enum hdmi_scan_mode scan_mode; 113 enum hdmi_scan_mode scan_mode;
107 enum hdmi_colorimetry colorimetry; 114 enum hdmi_colorimetry colorimetry;
108 enum hdmi_picture_aspect picture_aspect; 115 enum hdmi_picture_aspect picture_aspect;
@@ -218,14 +225,52 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
218ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, 225ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
219 void *buffer, size_t size); 226 void *buffer, size_t size);
220 227
228enum hdmi_3d_structure {
229 HDMI_3D_STRUCTURE_INVALID = -1,
230 HDMI_3D_STRUCTURE_FRAME_PACKING = 0,
231 HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE,
232 HDMI_3D_STRUCTURE_LINE_ALTERNATIVE,
233 HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL,
234 HDMI_3D_STRUCTURE_L_DEPTH,
235 HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH,
236 HDMI_3D_STRUCTURE_TOP_AND_BOTTOM,
237 HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8,
238};
239
240
221struct hdmi_vendor_infoframe { 241struct hdmi_vendor_infoframe {
222 enum hdmi_infoframe_type type; 242 enum hdmi_infoframe_type type;
223 unsigned char version; 243 unsigned char version;
224 unsigned char length; 244 unsigned char length;
225 u8 data[27]; 245 unsigned int oui;
246 u8 vic;
247 enum hdmi_3d_structure s3d_struct;
248 unsigned int s3d_ext_data;
226}; 249};
227 250
251int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
228ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, 252ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
229 void *buffer, size_t size); 253 void *buffer, size_t size);
230 254
255union hdmi_vendor_any_infoframe {
256 struct {
257 enum hdmi_infoframe_type type;
258 unsigned char version;
259 unsigned char length;
260 unsigned int oui;
261 } any;
262 struct hdmi_vendor_infoframe hdmi;
263};
264
265union hdmi_infoframe {
266 struct hdmi_any_infoframe any;
267 struct hdmi_avi_infoframe avi;
268 struct hdmi_spd_infoframe spd;
269 union hdmi_vendor_any_infoframe vendor;
270 struct hdmi_audio_infoframe audio;
271};
272
273ssize_t
274hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
275
231#endif /* _DRM_HDMI_H */ 276#endif /* _DRM_HDMI_H */
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 3869c525b052..369cf2cd5144 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/atomic.h>
11 12
12#ifndef _IIO_TRIGGER_H_ 13#ifndef _IIO_TRIGGER_H_
13#define _IIO_TRIGGER_H_ 14#define _IIO_TRIGGER_H_
@@ -61,7 +62,7 @@ struct iio_trigger {
61 62
62 struct list_head list; 63 struct list_head list;
63 struct list_head alloc_list; 64 struct list_head alloc_list;
64 int use_count; 65 atomic_t use_count;
65 66
66 struct irq_chip subirq_chip; 67 struct irq_chip subirq_chip;
67 int subirq_base; 68 int subirq_base;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index b99cd23f3474..79640e015a86 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -5,45 +5,13 @@
5 5
6#include <linux/bitmap.h> 6#include <linux/bitmap.h>
7#include <linux/if.h> 7#include <linux/if.h>
8#include <linux/ip.h>
8#include <linux/netdevice.h> 9#include <linux/netdevice.h>
9#include <linux/rcupdate.h> 10#include <linux/rcupdate.h>
10#include <linux/timer.h> 11#include <linux/timer.h>
11#include <linux/sysctl.h> 12#include <linux/sysctl.h>
12#include <linux/rtnetlink.h> 13#include <linux/rtnetlink.h>
13 14
14enum
15{
16 IPV4_DEVCONF_FORWARDING=1,
17 IPV4_DEVCONF_MC_FORWARDING,
18 IPV4_DEVCONF_PROXY_ARP,
19 IPV4_DEVCONF_ACCEPT_REDIRECTS,
20 IPV4_DEVCONF_SECURE_REDIRECTS,
21 IPV4_DEVCONF_SEND_REDIRECTS,
22 IPV4_DEVCONF_SHARED_MEDIA,
23 IPV4_DEVCONF_RP_FILTER,
24 IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
25 IPV4_DEVCONF_BOOTP_RELAY,
26 IPV4_DEVCONF_LOG_MARTIANS,
27 IPV4_DEVCONF_TAG,
28 IPV4_DEVCONF_ARPFILTER,
29 IPV4_DEVCONF_MEDIUM_ID,
30 IPV4_DEVCONF_NOXFRM,
31 IPV4_DEVCONF_NOPOLICY,
32 IPV4_DEVCONF_FORCE_IGMP_VERSION,
33 IPV4_DEVCONF_ARP_ANNOUNCE,
34 IPV4_DEVCONF_ARP_IGNORE,
35 IPV4_DEVCONF_PROMOTE_SECONDARIES,
36 IPV4_DEVCONF_ARP_ACCEPT,
37 IPV4_DEVCONF_ARP_NOTIFY,
38 IPV4_DEVCONF_ACCEPT_LOCAL,
39 IPV4_DEVCONF_SRC_VMARK,
40 IPV4_DEVCONF_PROXY_ARP_PVLAN,
41 IPV4_DEVCONF_ROUTE_LOCALNET,
42 __IPV4_DEVCONF_MAX
43};
44
45#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
46
47struct ipv4_devconf { 15struct ipv4_devconf {
48 void *sysctl; 16 void *sysctl;
49 int data[IPV4_DEVCONF_MAX]; 17 int data[IPV4_DEVCONF_MAX];
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 850e95bc766c..b8b7dc755752 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -101,6 +101,7 @@ struct inet6_skb_parm {
101#define IP6SKB_FORWARDED 2 101#define IP6SKB_FORWARDED 2
102#define IP6SKB_REROUTED 4 102#define IP6SKB_REROUTED 4
103#define IP6SKB_ROUTERALERT 8 103#define IP6SKB_ROUTERALERT 8
104#define IP6SKB_FRAGMENTED 16
104}; 105};
105 106
106#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) 107#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3bef14c6586b..482ad2d84a32 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -629,7 +629,7 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
629static inline void tracing_start(void) { } 629static inline void tracing_start(void) { }
630static inline void tracing_stop(void) { } 630static inline void tracing_stop(void) { }
631static inline void ftrace_off_permanent(void) { } 631static inline void ftrace_off_permanent(void) { }
632static inline void trace_dump_stack(void) { } 632static inline void trace_dump_stack(int skip) { }
633 633
634static inline void tracing_on(void) { } 634static inline void tracing_on(void) { }
635static inline void tracing_off(void) { } 635static inline void tracing_off(void) { }
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index dab34a1deb2c..b6bdcd66c07d 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -103,15 +103,15 @@
103#define IMX6Q_GPR1_EXC_MON_MASK BIT(22) 103#define IMX6Q_GPR1_EXC_MON_MASK BIT(22)
104#define IMX6Q_GPR1_EXC_MON_OKAY 0x0 104#define IMX6Q_GPR1_EXC_MON_OKAY 0x0
105#define IMX6Q_GPR1_EXC_MON_SLVE BIT(22) 105#define IMX6Q_GPR1_EXC_MON_SLVE BIT(22)
106#define IMX6Q_GPR1_MIPI_IPU2_SEL_MASK BIT(21) 106#define IMX6Q_GPR1_ENET_CLK_SEL_MASK BIT(21)
107#define IMX6Q_GPR1_MIPI_IPU2_SEL_GASKET 0x0 107#define IMX6Q_GPR1_ENET_CLK_SEL_PAD 0
108#define IMX6Q_GPR1_MIPI_IPU2_SEL_IOMUX BIT(21) 108#define IMX6Q_GPR1_ENET_CLK_SEL_ANATOP BIT(21)
109#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(20) 109#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(20)
110#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0
111#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(20)
112#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(19)
113#define IMX6Q_GPR1_MIPI_IPU2_MUX_GASKET 0x0 110#define IMX6Q_GPR1_MIPI_IPU2_MUX_GASKET 0x0
114#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(19) 111#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(20)
112#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(19)
113#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0
114#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(19)
115#define IMX6Q_GPR1_PCIE_TEST_PD BIT(18) 115#define IMX6Q_GPR1_PCIE_TEST_PD BIT(18)
116#define IMX6Q_GPR1_IPU_VPU_MUX_MASK BIT(17) 116#define IMX6Q_GPR1_IPU_VPU_MUX_MASK BIT(17)
117#define IMX6Q_GPR1_IPU_VPU_MUX_IPU1 0x0 117#define IMX6Q_GPR1_IPU_VPU_MUX_IPU1 0x0
@@ -279,41 +279,88 @@
279#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29) 279#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29)
280#define IMX6Q_GPR13_CAN1_STOP_REQ BIT(28) 280#define IMX6Q_GPR13_CAN1_STOP_REQ BIT(28)
281#define IMX6Q_GPR13_ENET_STOP_REQ BIT(27) 281#define IMX6Q_GPR13_ENET_STOP_REQ BIT(27)
282#define IMX6Q_GPR13_SATA_PHY_8_MASK (0x7 << 24) 282#define IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK (0x7 << 24)
283#define IMX6Q_GPR13_SATA_PHY_8_0_5_DB (0x0 << 24) 283#define IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB (0x0 << 24)
284#define IMX6Q_GPR13_SATA_PHY_8_1_0_DB (0x1 << 24) 284#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB (0x1 << 24)
285#define IMX6Q_GPR13_SATA_PHY_8_1_5_DB (0x2 << 24) 285#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB (0x2 << 24)
286#define IMX6Q_GPR13_SATA_PHY_8_2_0_DB (0x3 << 24) 286#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB (0x3 << 24)
287#define IMX6Q_GPR13_SATA_PHY_8_2_5_DB (0x4 << 24) 287#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB (0x4 << 24)
288#define IMX6Q_GPR13_SATA_PHY_8_3_0_DB (0x5 << 24) 288#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB (0x5 << 24)
289#define IMX6Q_GPR13_SATA_PHY_8_3_5_DB (0x6 << 24) 289#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB (0x6 << 24)
290#define IMX6Q_GPR13_SATA_PHY_8_4_0_DB (0x7 << 24) 290#define IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB (0x7 << 24)
291#define IMX6Q_GPR13_SATA_PHY_7_MASK (0x1f << 19) 291#define IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK (0x1f << 19)
292#define IMX6Q_GPR13_SATA_PHY_7_SATA1I (0x10 << 19) 292#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1I (0x10 << 19)
293#define IMX6Q_GPR13_SATA_PHY_7_SATA1M (0x10 << 19) 293#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1M (0x10 << 19)
294#define IMX6Q_GPR13_SATA_PHY_7_SATA1X (0x1a << 19) 294#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1X (0x1a << 19)
295#define IMX6Q_GPR13_SATA_PHY_7_SATA2I (0x12 << 19) 295#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2I (0x12 << 19)
296#define IMX6Q_GPR13_SATA_PHY_7_SATA2M (0x12 << 19) 296#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M (0x12 << 19)
297#define IMX6Q_GPR13_SATA_PHY_7_SATA2X (0x1a << 19) 297#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2X (0x1a << 19)
298#define IMX6Q_GPR13_SATA_PHY_6_MASK (0x7 << 16) 298#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK (0x7 << 16)
299#define IMX6Q_GPR13_SATA_SPEED_MASK BIT(15) 299#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_1F (0x0 << 16)
300#define IMX6Q_GPR13_SATA_SPEED_1P5G 0x0 300#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_2F (0x1 << 16)
301#define IMX6Q_GPR13_SATA_SPEED_3P0G BIT(15) 301#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_4F (0x2 << 16)
302#define IMX6Q_GPR13_SATA_PHY_5 BIT(14) 302#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F (0x3 << 16)
303#define IMX6Q_GPR13_SATA_PHY_4_MASK (0x7 << 11) 303#define IMX6Q_GPR13_SATA_SPD_MODE_MASK BIT(15)
304#define IMX6Q_GPR13_SATA_PHY_4_16_16 (0x0 << 11) 304#define IMX6Q_GPR13_SATA_SPD_MODE_1P5G 0x0
305#define IMX6Q_GPR13_SATA_PHY_4_14_16 (0x1 << 11) 305#define IMX6Q_GPR13_SATA_SPD_MODE_3P0G BIT(15)
306#define IMX6Q_GPR13_SATA_PHY_4_12_16 (0x2 << 11) 306#define IMX6Q_GPR13_SATA_MPLL_SS_EN BIT(14)
307#define IMX6Q_GPR13_SATA_PHY_4_10_16 (0x3 << 11) 307#define IMX6Q_GPR13_SATA_TX_ATTEN_MASK (0x7 << 11)
308#define IMX6Q_GPR13_SATA_PHY_4_9_16 (0x4 << 11) 308#define IMX6Q_GPR13_SATA_TX_ATTEN_16_16 (0x0 << 11)
309#define IMX6Q_GPR13_SATA_PHY_4_8_16 (0x5 << 11) 309#define IMX6Q_GPR13_SATA_TX_ATTEN_14_16 (0x1 << 11)
310#define IMX6Q_GPR13_SATA_PHY_3_MASK (0xf << 7) 310#define IMX6Q_GPR13_SATA_TX_ATTEN_12_16 (0x2 << 11)
311#define IMX6Q_GPR13_SATA_PHY_3_OFF 0x7 311#define IMX6Q_GPR13_SATA_TX_ATTEN_10_16 (0x3 << 11)
312#define IMX6Q_GPR13_SATA_PHY_2_MASK (0x1f << 2) 312#define IMX6Q_GPR13_SATA_TX_ATTEN_9_16 (0x4 << 11)
313#define IMX6Q_GPR13_SATA_PHY_2_OFF 0x2 313#define IMX6Q_GPR13_SATA_TX_ATTEN_8_16 (0x5 << 11)
314#define IMX6Q_GPR13_SATA_PHY_1_MASK (0x3 << 0) 314#define IMX6Q_GPR13_SATA_TX_BOOST_MASK (0xf << 7)
315#define IMX6Q_GPR13_SATA_PHY_1_FAST (0x0 << 0) 315#define IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB (0x0 << 7)
316#define IMX6Q_GPR13_SATA_PHY_1_MED (0x1 << 0) 316#define IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB (0x1 << 7)
317#define IMX6Q_GPR13_SATA_PHY_1_SLOW (0x2 << 0) 317#define IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB (0x2 << 7)
318 318#define IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB (0x3 << 7)
319#define IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB (0x4 << 7)
320#define IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB (0x5 << 7)
321#define IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB (0x6 << 7)
322#define IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB (0x7 << 7)
323#define IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB (0x8 << 7)
324#define IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB (0x9 << 7)
325#define IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB (0xa << 7)
326#define IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB (0xb << 7)
327#define IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB (0xc << 7)
328#define IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB (0xd << 7)
329#define IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB (0xe << 7)
330#define IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB (0xf << 7)
331#define IMX6Q_GPR13_SATA_TX_LVL_MASK (0x1f << 2)
332#define IMX6Q_GPR13_SATA_TX_LVL_0_937_V (0x00 << 2)
333#define IMX6Q_GPR13_SATA_TX_LVL_0_947_V (0x01 << 2)
334#define IMX6Q_GPR13_SATA_TX_LVL_0_957_V (0x02 << 2)
335#define IMX6Q_GPR13_SATA_TX_LVL_0_966_V (0x03 << 2)
336#define IMX6Q_GPR13_SATA_TX_LVL_0_976_V (0x04 << 2)
337#define IMX6Q_GPR13_SATA_TX_LVL_0_986_V (0x05 << 2)
338#define IMX6Q_GPR13_SATA_TX_LVL_0_996_V (0x06 << 2)
339#define IMX6Q_GPR13_SATA_TX_LVL_1_005_V (0x07 << 2)
340#define IMX6Q_GPR13_SATA_TX_LVL_1_015_V (0x08 << 2)
341#define IMX6Q_GPR13_SATA_TX_LVL_1_025_V (0x09 << 2)
342#define IMX6Q_GPR13_SATA_TX_LVL_1_035_V (0x0a << 2)
343#define IMX6Q_GPR13_SATA_TX_LVL_1_045_V (0x0b << 2)
344#define IMX6Q_GPR13_SATA_TX_LVL_1_054_V (0x0c << 2)
345#define IMX6Q_GPR13_SATA_TX_LVL_1_064_V (0x0d << 2)
346#define IMX6Q_GPR13_SATA_TX_LVL_1_074_V (0x0e << 2)
347#define IMX6Q_GPR13_SATA_TX_LVL_1_084_V (0x0f << 2)
348#define IMX6Q_GPR13_SATA_TX_LVL_1_094_V (0x10 << 2)
349#define IMX6Q_GPR13_SATA_TX_LVL_1_104_V (0x11 << 2)
350#define IMX6Q_GPR13_SATA_TX_LVL_1_113_V (0x12 << 2)
351#define IMX6Q_GPR13_SATA_TX_LVL_1_123_V (0x13 << 2)
352#define IMX6Q_GPR13_SATA_TX_LVL_1_133_V (0x14 << 2)
353#define IMX6Q_GPR13_SATA_TX_LVL_1_143_V (0x15 << 2)
354#define IMX6Q_GPR13_SATA_TX_LVL_1_152_V (0x16 << 2)
355#define IMX6Q_GPR13_SATA_TX_LVL_1_162_V (0x17 << 2)
356#define IMX6Q_GPR13_SATA_TX_LVL_1_172_V (0x18 << 2)
357#define IMX6Q_GPR13_SATA_TX_LVL_1_182_V (0x19 << 2)
358#define IMX6Q_GPR13_SATA_TX_LVL_1_191_V (0x1a << 2)
359#define IMX6Q_GPR13_SATA_TX_LVL_1_201_V (0x1b << 2)
360#define IMX6Q_GPR13_SATA_TX_LVL_1_211_V (0x1c << 2)
361#define IMX6Q_GPR13_SATA_TX_LVL_1_221_V (0x1d << 2)
362#define IMX6Q_GPR13_SATA_TX_LVL_1_230_V (0x1e << 2)
363#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2)
364#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1)
365#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0)
319#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ 366#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 8d73fe29796a..db1791bb997a 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -113,11 +113,27 @@
113#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3) 113#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
114#define CNTRLREG_TSCENB BIT(7) 114#define CNTRLREG_TSCENB BIT(7)
115 115
116/* FIFO READ Register */
117#define FIFOREAD_DATA_MASK (0xfff << 0)
118#define FIFOREAD_CHNLID_MASK (0xf << 16)
119
120/* Sequencer Status */
121#define SEQ_STATUS BIT(5)
122
116#define ADC_CLK 3000000 123#define ADC_CLK 3000000
117#define MAX_CLK_DIV 7 124#define MAX_CLK_DIV 7
118#define TOTAL_STEPS 16 125#define TOTAL_STEPS 16
119#define TOTAL_CHANNELS 8 126#define TOTAL_CHANNELS 8
120 127
128/*
129* ADC runs at 3MHz, and it takes
130* 15 cycles to latch one data output.
131* Hence the idle time for ADC to
132* process one sample data would be
133* around 5 micro seconds.
134*/
135#define IDLE_TIMEOUT 5 /* microsec */
136
121#define TSCADC_CELLS 2 137#define TSCADC_CELLS 2
122 138
123struct ti_tscadc_dev { 139struct ti_tscadc_dev {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8de8d8f22384..68029b30c3dc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
309 __be16 max_desc_sz_rq; 309 __be16 max_desc_sz_rq;
310 u8 rsvd21[2]; 310 u8 rsvd21[2];
311 __be16 max_desc_sz_sq_dc; 311 __be16 max_desc_sz_sq_dc;
312 u8 rsvd22[4]; 312 __be32 max_qp_mcg;
313 __be16 max_qp_mcg; 313 u8 rsvd22[3];
314 u8 rsvd23;
315 u8 log_max_mcg; 314 u8 log_max_mcg;
316 u8 rsvd24; 315 u8 rsvd23;
317 u8 log_max_pd; 316 u8 log_max_pd;
318 u8 rsvd25; 317 u8 rsvd24;
319 u8 log_max_xrcd; 318 u8 log_max_xrcd;
320 u8 rsvd26[42]; 319 u8 rsvd25[42];
321 __be16 log_uar_page_sz; 320 __be16 log_uar_page_sz;
322 u8 rsvd27[28]; 321 u8 rsvd26[28];
323 u8 log_msx_atomic_size_qp; 322 u8 log_msx_atomic_size_qp;
324 u8 rsvd28[2]; 323 u8 rsvd27[2];
325 u8 log_msx_atomic_size_dc; 324 u8 log_msx_atomic_size_dc;
326 u8 rsvd29[76]; 325 u8 rsvd28[76];
327}; 326};
328 327
329 328
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
472struct mlx5_eqe_page_req { 471struct mlx5_eqe_page_req {
473 u8 rsvd0[2]; 472 u8 rsvd0[2];
474 __be16 func_id; 473 __be16 func_id;
475 u8 rsvd1[2]; 474 __be32 num_pages;
476 __be16 num_pages; 475 __be32 rsvd1[5];
477 __be32 rsvd2[5];
478}; 476};
479 477
480union ev_data { 478union ev_data {
@@ -690,6 +688,26 @@ struct mlx5_query_cq_mbox_out {
690 __be64 pas[0]; 688 __be64 pas[0];
691}; 689};
692 690
691struct mlx5_enable_hca_mbox_in {
692 struct mlx5_inbox_hdr hdr;
693 u8 rsvd[8];
694};
695
696struct mlx5_enable_hca_mbox_out {
697 struct mlx5_outbox_hdr hdr;
698 u8 rsvd[8];
699};
700
701struct mlx5_disable_hca_mbox_in {
702 struct mlx5_inbox_hdr hdr;
703 u8 rsvd[8];
704};
705
706struct mlx5_disable_hca_mbox_out {
707 struct mlx5_outbox_hdr hdr;
708 u8 rsvd[8];
709};
710
693struct mlx5_eq_context { 711struct mlx5_eq_context {
694 u8 status; 712 u8 status;
695 u8 ec_oi; 713 u8 ec_oi;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index f22e4419839b..8888381fc150 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -101,6 +101,8 @@ enum {
101 MLX5_CMD_OP_QUERY_ADAPTER = 0x101, 101 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
102 MLX5_CMD_OP_INIT_HCA = 0x102, 102 MLX5_CMD_OP_INIT_HCA = 0x102,
103 MLX5_CMD_OP_TEARDOWN_HCA = 0x103, 103 MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
104 MLX5_CMD_OP_ENABLE_HCA = 0x104,
105 MLX5_CMD_OP_DISABLE_HCA = 0x105,
104 MLX5_CMD_OP_QUERY_PAGES = 0x107, 106 MLX5_CMD_OP_QUERY_PAGES = 0x107,
105 MLX5_CMD_OP_MANAGE_PAGES = 0x108, 107 MLX5_CMD_OP_MANAGE_PAGES = 0x108,
106 MLX5_CMD_OP_SET_HCA_CAP = 0x109, 108 MLX5_CMD_OP_SET_HCA_CAP = 0x109,
@@ -356,7 +358,7 @@ struct mlx5_caps {
356 u32 reserved_lkey; 358 u32 reserved_lkey;
357 u8 local_ca_ack_delay; 359 u8 local_ca_ack_delay;
358 u8 log_max_mcg; 360 u8 log_max_mcg;
359 u16 max_qp_mcg; 361 u32 max_qp_mcg;
360 int min_page_sz; 362 int min_page_sz;
361}; 363};
362 364
@@ -689,8 +691,8 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
689int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 691int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
690void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 692void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
691void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 693void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
692 s16 npages); 694 s32 npages);
693int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev); 695int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
694int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); 696int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
695void mlx5_register_debugfs(void); 697void mlx5_register_debugfs(void);
696void mlx5_unregister_debugfs(void); 698void mlx5_unregister_debugfs(void);
@@ -729,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
729int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); 731int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
730void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); 732void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
731 733
732typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
733int mlx5_register_health_report_handler(health_handler_t handler);
734void mlx5_unregister_health_report_handler(void);
735const char *mlx5_command_str(int command); 734const char *mlx5_command_str(int command);
736int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); 735int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
737void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); 736void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fb425aa16c01..faf4b7c1ad12 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -332,6 +332,7 @@ struct mm_struct {
332 unsigned long pgoff, unsigned long flags); 332 unsigned long pgoff, unsigned long flags);
333#endif 333#endif
334 unsigned long mmap_base; /* base of mmap area */ 334 unsigned long mmap_base; /* base of mmap area */
335 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
335 unsigned long task_size; /* size of task vm space */ 336 unsigned long task_size; /* size of task vm space */
336 unsigned long highest_vm_end; /* highest vma end address */ 337 unsigned long highest_vm_end; /* highest vma end address */
337 pgd_t * pgd; 338 pgd_t * pgd;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index b62d4af6c667..45e921401b06 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -361,7 +361,8 @@ struct ssb_device_id {
361 __u16 vendor; 361 __u16 vendor;
362 __u16 coreid; 362 __u16 coreid;
363 __u8 revision; 363 __u8 revision;
364}; 364 __u8 __pad;
365} __attribute__((packed, aligned(2)));
365#define SSB_DEVICE(_vendor, _coreid, _revision) \ 366#define SSB_DEVICE(_vendor, _coreid, _revision) \
366 { .vendor = _vendor, .coreid = _coreid, .revision = _revision, } 367 { .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
367#define SSB_DEVTABLE_END \ 368#define SSB_DEVTABLE_END \
@@ -377,7 +378,7 @@ struct bcma_device_id {
377 __u16 id; 378 __u16 id;
378 __u8 rev; 379 __u8 rev;
379 __u8 class; 380 __u8 class;
380}; 381} __attribute__((packed,aligned(2)));
381#define BCMA_CORE(_manuf, _id, _rev, _class) \ 382#define BCMA_CORE(_manuf, _id, _rev, _class) \
382 { .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, } 383 { .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
383#define BCMA_CORETABLE_END \ 384#define BCMA_CORETABLE_END \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0741a1e919a5..9a4156845e93 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -973,7 +973,7 @@ struct net_device_ops {
973 gfp_t gfp); 973 gfp_t gfp);
974 void (*ndo_netpoll_cleanup)(struct net_device *dev); 974 void (*ndo_netpoll_cleanup)(struct net_device *dev);
975#endif 975#endif
976#ifdef CONFIG_NET_LL_RX_POLL 976#ifdef CONFIG_NET_RX_BUSY_POLL
977 int (*ndo_busy_poll)(struct napi_struct *dev); 977 int (*ndo_busy_poll)(struct napi_struct *dev);
978#endif 978#endif
979 int (*ndo_set_vf_mac)(struct net_device *dev, 979 int (*ndo_set_vf_mac)(struct net_device *dev,
diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h
index 9eb515bb799d..1706b3597ce0 100644
--- a/include/linux/platform_data/mmc-pxamci.h
+++ b/include/linux/platform_data/mmc-pxamci.h
@@ -12,7 +12,7 @@ struct pxamci_platform_data {
12 unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */ 12 unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */
13 int (*init)(struct device *, irq_handler_t , void *); 13 int (*init)(struct device *, irq_handler_t , void *);
14 int (*get_ro)(struct device *); 14 int (*get_ro)(struct device *);
15 void (*setpower)(struct device *, unsigned int); 15 int (*setpower)(struct device *, unsigned int);
16 void (*exit)(struct device *, void *); 16 void (*exit)(struct device *, void *);
17 int gpio_card_detect; /* gpio detecting card insertion */ 17 int gpio_card_detect; /* gpio detecting card insertion */
18 int gpio_card_ro; /* gpio detecting read only toggle */ 18 int gpio_card_ro; /* gpio detecting read only toggle */
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
index 80587fdbba3e..1a2e9901a22e 100644
--- a/include/linux/platform_data/rcar-du.h
+++ b/include/linux/platform_data/rcar-du.h
@@ -16,8 +16,18 @@
16 16
17#include <drm/drm_mode.h> 17#include <drm/drm_mode.h>
18 18
19enum rcar_du_output {
20 RCAR_DU_OUTPUT_DPAD0,
21 RCAR_DU_OUTPUT_DPAD1,
22 RCAR_DU_OUTPUT_LVDS0,
23 RCAR_DU_OUTPUT_LVDS1,
24 RCAR_DU_OUTPUT_TCON,
25 RCAR_DU_OUTPUT_MAX,
26};
27
19enum rcar_du_encoder_type { 28enum rcar_du_encoder_type {
20 RCAR_DU_ENCODER_UNUSED = 0, 29 RCAR_DU_ENCODER_UNUSED = 0,
30 RCAR_DU_ENCODER_NONE,
21 RCAR_DU_ENCODER_VGA, 31 RCAR_DU_ENCODER_VGA,
22 RCAR_DU_ENCODER_LVDS, 32 RCAR_DU_ENCODER_LVDS,
23}; 33};
@@ -28,22 +38,32 @@ struct rcar_du_panel_data {
28 struct drm_mode_modeinfo mode; 38 struct drm_mode_modeinfo mode;
29}; 39};
30 40
31struct rcar_du_encoder_lvds_data { 41struct rcar_du_connector_lvds_data {
32 struct rcar_du_panel_data panel; 42 struct rcar_du_panel_data panel;
33}; 43};
34 44
35struct rcar_du_encoder_vga_data { 45struct rcar_du_connector_vga_data {
36 /* TODO: Add DDC information for EDID retrieval */ 46 /* TODO: Add DDC information for EDID retrieval */
37}; 47};
38 48
49/*
50 * struct rcar_du_encoder_data - Encoder platform data
51 * @type: the encoder type (RCAR_DU_ENCODER_*)
52 * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*)
53 * @connector.lvds: platform data for LVDS connectors
54 * @connector.vga: platform data for VGA connectors
55 *
56 * Encoder platform data describes an on-board encoder, its associated DU SoC
57 * output, and the connector.
58 */
39struct rcar_du_encoder_data { 59struct rcar_du_encoder_data {
40 enum rcar_du_encoder_type encoder; 60 enum rcar_du_encoder_type type;
41 unsigned int output; 61 enum rcar_du_output output;
42 62
43 union { 63 union {
44 struct rcar_du_encoder_lvds_data lvds; 64 struct rcar_du_connector_lvds_data lvds;
45 struct rcar_du_encoder_vga_data vga; 65 struct rcar_du_connector_vga_data vga;
46 } u; 66 } connector;
47}; 67};
48 68
49struct rcar_du_platform_data { 69struct rcar_du_platform_data {
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 75981d0b57dc..580a5320cc96 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/rbtree.h> 17#include <linux/rbtree.h>
18#include <linux/err.h>
18 19
19struct module; 20struct module;
20struct device; 21struct device;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 50d04b92ceda..078066daffd4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1532,6 +1532,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1532 * Test if a process is not yet dead (at most zombie state) 1532 * Test if a process is not yet dead (at most zombie state)
1533 * If pid_alive fails, then pointers within the task structure 1533 * If pid_alive fails, then pointers within the task structure
1534 * can be stale and must not be dereferenced. 1534 * can be stale and must not be dereferenced.
1535 *
1536 * Return: 1 if the process is alive. 0 otherwise.
1535 */ 1537 */
1536static inline int pid_alive(struct task_struct *p) 1538static inline int pid_alive(struct task_struct *p)
1537{ 1539{
@@ -1543,6 +1545,8 @@ static inline int pid_alive(struct task_struct *p)
1543 * @tsk: Task structure to be checked. 1545 * @tsk: Task structure to be checked.
1544 * 1546 *
1545 * Check if a task structure is the first user space task the kernel created. 1547 * Check if a task structure is the first user space task the kernel created.
1548 *
1549 * Return: 1 if the task structure is init. 0 otherwise.
1546 */ 1550 */
1547static inline int is_global_init(struct task_struct *tsk) 1551static inline int is_global_init(struct task_struct *tsk)
1548{ 1552{
@@ -1628,6 +1632,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
1628#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1632#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1629#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1633#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1630#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1634#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1635#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
1631 1636
1632/* 1637/*
1633 * Only the _current_ task can read/write to tsk->flags, but other 1638 * Only the _current_ task can read/write to tsk->flags, but other
@@ -1893,6 +1898,8 @@ extern struct task_struct *idle_task(int cpu);
1893/** 1898/**
1894 * is_idle_task - is the specified task an idle task? 1899 * is_idle_task - is the specified task an idle task?
1895 * @p: the task in question. 1900 * @p: the task in question.
1901 *
1902 * Return: 1 if @p is an idle task. 0 otherwise.
1896 */ 1903 */
1897static inline bool is_idle_task(const struct task_struct *p) 1904static inline bool is_idle_task(const struct task_struct *p)
1898{ 1905{
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 382cf710ca9a..5b1c9848124c 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -124,6 +124,10 @@ void shdma_chan_remove(struct shdma_chan *schan);
124int shdma_init(struct device *dev, struct shdma_dev *sdev, 124int shdma_init(struct device *dev, struct shdma_dev *sdev,
125 int chan_num); 125 int chan_num);
126void shdma_cleanup(struct shdma_dev *sdev); 126void shdma_cleanup(struct shdma_dev *sdev);
127#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
127bool shdma_chan_filter(struct dma_chan *chan, void *arg); 128bool shdma_chan_filter(struct dma_chan *chan, void *arg);
129#else
130#define shdma_chan_filter NULL
131#endif
128 132
129#endif 133#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5afefa01a13c..3b71a4e83642 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -501,7 +501,7 @@ struct sk_buff {
501 /* 7/9 bit hole (depending on ndisc_nodetype presence) */ 501 /* 7/9 bit hole (depending on ndisc_nodetype presence) */
502 kmemcheck_bitfield_end(flags2); 502 kmemcheck_bitfield_end(flags2);
503 503
504#if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL 504#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
505 union { 505 union {
506 unsigned int napi_id; 506 unsigned int napi_id;
507 dma_cookie_t dma_cookie; 507 dma_cookie_t dma_cookie;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ced949a..75f34949d9ab 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -117,9 +117,17 @@ do { \
117#endif /*arch_spin_is_contended*/ 117#endif /*arch_spin_is_contended*/
118#endif 118#endif
119 119
120/* The lock does not imply full memory barrier. */ 120/*
121#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK 121 * Despite its name it doesn't necessarily has to be a full barrier.
122static inline void smp_mb__after_lock(void) { smp_mb(); } 122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129#ifndef smp_mb__before_spinlock
130#define smp_mb__before_spinlock() smp_wmb()
123#endif 131#endif
124 132
125/** 133/**
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 6d870353674a..1821445708d6 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -121,6 +121,7 @@ struct rpc_task_setup {
121#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ 121#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
122#define RPC_TASK_SENT 0x0800 /* message was sent */ 122#define RPC_TASK_SENT 0x0800 /* message was sent */
123#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ 123#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
124#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
124 125
125#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) 126#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
126#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) 127#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5fd30d2a415..8d4fa82bfb91 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
67 swp_entry_t arch_entry; 67 swp_entry_t arch_entry;
68 68
69 BUG_ON(pte_file(pte)); 69 BUG_ON(pte_file(pte));
70 if (pte_swp_soft_dirty(pte))
71 pte = pte_swp_clear_soft_dirty(pte);
70 arch_entry = __pte_to_swp_entry(pte); 72 arch_entry = __pte_to_swp_entry(pte);
71 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 73 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
72} 74}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4147d700a293..84662ecc7b51 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
802asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, 802asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
803 int __user *); 803 int __user *);
804#else 804#else
805#ifdef CONFIG_CLONE_BACKWARDS3
806asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
807 int __user *, int);
808#else
805asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, 809asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
806 int __user *, int); 810 int __user *, int);
807#endif 811#endif
812#endif
808 813
809asmlinkage long sys_execve(const char __user *filename, 814asmlinkage long sys_execve(const char __user *filename,
810 const char __user *const __user *argv, 815 const char __user *const __user *argv,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 9180f4b85e6d..62bd8b72873c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -174,10 +174,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk) { }
174#endif 174#endif
175 175
176 176
177# ifdef CONFIG_CPU_IDLE_GOV_MENU
178extern void menu_hrtimer_cancel(void);
179# else
180static inline void menu_hrtimer_cancel(void) {}
181# endif /* CONFIG_CPU_IDLE_GOV_MENU */
182
183#endif 177#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a232b7ece1f6..0eec2689b955 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -367,17 +367,6 @@ struct usb_bus {
367 367
368/* ----------------------------------------------------------------------- */ 368/* ----------------------------------------------------------------------- */
369 369
370/* This is arbitrary.
371 * From USB 2.0 spec Table 11-13, offset 7, a hub can
372 * have up to 255 ports. The most yet reported is 10.
373 *
374 * Current Wireless USB host hardware (Intel i1480 for example) allows
375 * up to 22 devices to connect. Upcoming hardware might raise that
376 * limit. Because the arrays need to add a bit for hub status data, we
377 * do 31, so plus one evens out to four bytes.
378 */
379#define USB_MAXCHILDREN (31)
380
381struct usb_tt; 370struct usb_tt;
382 371
383enum usb_device_removable { 372enum usb_device_removable {
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index b6b215f13b45..14105c26a836 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -23,6 +23,7 @@ struct user_namespace {
23 struct uid_gid_map projid_map; 23 struct uid_gid_map projid_map;
24 atomic_t count; 24 atomic_t count;
25 struct user_namespace *parent; 25 struct user_namespace *parent;
26 int level;
26 kuid_t owner; 27 kuid_t owner;
27 kgid_t group; 28 kgid_t group;
28 unsigned int proc_inum; 29 unsigned int proc_inum;
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index ddb419cf4530..502073a53dd3 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -45,7 +45,8 @@ struct vga_switcheroo_client_ops {
45#if defined(CONFIG_VGA_SWITCHEROO) 45#if defined(CONFIG_VGA_SWITCHEROO)
46void vga_switcheroo_unregister_client(struct pci_dev *dev); 46void vga_switcheroo_unregister_client(struct pci_dev *dev);
47int vga_switcheroo_register_client(struct pci_dev *dev, 47int vga_switcheroo_register_client(struct pci_dev *dev,
48 const struct vga_switcheroo_client_ops *ops); 48 const struct vga_switcheroo_client_ops *ops,
49 bool driver_power_control);
49int vga_switcheroo_register_audio_client(struct pci_dev *pdev, 50int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
50 const struct vga_switcheroo_client_ops *ops, 51 const struct vga_switcheroo_client_ops *ops,
51 int id, bool active); 52 int id, bool active);
@@ -60,11 +61,15 @@ int vga_switcheroo_process_delayed_switch(void);
60 61
61int vga_switcheroo_get_client_state(struct pci_dev *dev); 62int vga_switcheroo_get_client_state(struct pci_dev *dev);
62 63
64void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
65
66int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
67int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
63#else 68#else
64 69
65static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} 70static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
66static inline int vga_switcheroo_register_client(struct pci_dev *dev, 71static inline int vga_switcheroo_register_client(struct pci_dev *dev,
67 const struct vga_switcheroo_client_ops *ops) { return 0; } 72 const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
68static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} 73static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
69static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; } 74static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
70static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, 75static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
@@ -74,6 +79,10 @@ static inline void vga_switcheroo_unregister_handler(void) {}
74static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } 79static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
75static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } 80static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
76 81
82static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
83
84static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
77 86
78#endif 87#endif
79#endif /* _LINUX_VGA_SWITCHEROO_H_ */ 88#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
index 76be077340ea..7dc17e2456de 100644
--- a/include/linux/vmpressure.h
+++ b/include/linux/vmpressure.h
@@ -12,7 +12,7 @@ struct vmpressure {
12 unsigned long scanned; 12 unsigned long scanned;
13 unsigned long reclaimed; 13 unsigned long reclaimed;
14 /* The lock is used to keep the scanned/reclaimed above in sync. */ 14 /* The lock is used to keep the scanned/reclaimed above in sync. */
15 struct mutex sr_lock; 15 struct spinlock sr_lock;
16 16
17 /* The list of vmpressure_event structs. */ 17 /* The list of vmpressure_event structs. */
18 struct list_head events; 18 struct list_head events;
@@ -30,6 +30,7 @@ extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
30extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 30extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
31 31
32extern void vmpressure_init(struct vmpressure *vmpr); 32extern void vmpressure_init(struct vmpressure *vmpr);
33extern void vmpressure_cleanup(struct vmpressure *vmpr);
33extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 34extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
34extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr); 35extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
35extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css); 36extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index f487a4750b7f..a67fc1635592 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -811,6 +811,63 @@ do { \
811 __ret; \ 811 __ret; \
812}) 812})
813 813
814#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
815 lock, ret) \
816do { \
817 DEFINE_WAIT(__wait); \
818 \
819 for (;;) { \
820 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
821 if (condition) \
822 break; \
823 if (signal_pending(current)) { \
824 ret = -ERESTARTSYS; \
825 break; \
826 } \
827 spin_unlock_irq(&lock); \
828 ret = schedule_timeout(ret); \
829 spin_lock_irq(&lock); \
830 if (!ret) \
831 break; \
832 } \
833 finish_wait(&wq, &__wait); \
834} while (0)
835
836/**
837 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
838 * The condition is checked under the lock. This is expected
839 * to be called with the lock taken.
840 * @wq: the waitqueue to wait on
841 * @condition: a C expression for the event to wait for
842 * @lock: a locked spinlock_t, which will be released before schedule()
843 * and reacquired afterwards.
844 * @timeout: timeout, in jiffies
845 *
846 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
847 * @condition evaluates to true or signal is received. The @condition is
848 * checked each time the waitqueue @wq is woken up.
849 *
850 * wake_up() has to be called after changing any variable that could
851 * change the result of the wait condition.
852 *
853 * This is supposed to be called while holding the lock. The lock is
854 * dropped before going to sleep and is reacquired afterwards.
855 *
856 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
857 * was interrupted by a signal, and the remaining jiffies otherwise
858 * if the condition evaluated to true before the timeout elapsed.
859 */
860#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
861 timeout) \
862({ \
863 int __ret = timeout; \
864 \
865 if (!(condition)) \
866 __wait_event_interruptible_lock_irq_timeout( \
867 wq, condition, lock, __ret); \
868 __ret; \
869})
870
814 871
815/* 872/*
816 * These are the old interfaces to sleep waiting for an event. 873 * These are the old interfaces to sleep waiting for an event.
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 7343a27fe819..47ada23345a1 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -22,6 +22,7 @@
22#define _V4L2_CTRLS_H 22#define _V4L2_CTRLS_H
23 23
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/mutex.h>
25#include <linux/videodev2.h> 26#include <linux/videodev2.h>
26 27
27/* forward references */ 28/* forward references */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index a14339c2985f..8a358a2c97e6 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -27,7 +27,7 @@
27#include <linux/netdevice.h> 27#include <linux/netdevice.h>
28#include <net/ip.h> 28#include <net/ip.h>
29 29
30#ifdef CONFIG_NET_LL_RX_POLL 30#ifdef CONFIG_NET_RX_BUSY_POLL
31 31
32struct napi_struct; 32struct napi_struct;
33extern unsigned int sysctl_net_busy_read __read_mostly; 33extern unsigned int sysctl_net_busy_read __read_mostly;
@@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
122 if (rc > 0) 122 if (rc > 0)
123 /* local bh are disabled so it is ok to use _BH */ 123 /* local bh are disabled so it is ok to use _BH */
124 NET_ADD_STATS_BH(sock_net(sk), 124 NET_ADD_STATS_BH(sock_net(sk),
125 LINUX_MIB_LOWLATENCYRXPACKETS, rc); 125 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
126 126
127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && 127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
128 !need_resched() && !busy_loop_timeout(end_time)); 128 !need_resched() && !busy_loop_timeout(end_time));
@@ -146,7 +146,7 @@ static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
146 sk->sk_napi_id = skb->napi_id; 146 sk->sk_napi_id = skb->napi_id;
147} 147}
148 148
149#else /* CONFIG_NET_LL_RX_POLL */ 149#else /* CONFIG_NET_RX_BUSY_POLL */
150static inline unsigned long net_busy_loop_on(void) 150static inline unsigned long net_busy_loop_on(void)
151{ 151{
152 return 0; 152 return 0;
@@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk)
162 return false; 162 return false;
163} 163}
164 164
165static inline bool sk_busy_poll(struct sock *sk, int nonblock)
166{
167 return false;
168}
169
170static inline void skb_mark_napi_id(struct sk_buff *skb, 165static inline void skb_mark_napi_id(struct sk_buff *skb,
171 struct napi_struct *napi) 166 struct napi_struct *napi)
172{ 167{
@@ -181,5 +176,10 @@ static inline bool busy_loop_timeout(unsigned long end_time)
181 return true; 176 return true;
182} 177}
183 178
184#endif /* CONFIG_NET_LL_RX_POLL */ 179static inline bool sk_busy_loop(struct sock *sk, int nonblock)
180{
181 return false;
182}
183
184#endif /* CONFIG_NET_RX_BUSY_POLL */
185#endif /* _LINUX_NET_BUSY_POLL_H */ 185#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 2a601e7da1bf..48ec25a7fcb6 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -300,7 +300,7 @@ extern void inet6_rt_notify(int event, struct rt6_info *rt,
300 struct nl_info *info); 300 struct nl_info *info);
301 301
302extern void fib6_run_gc(unsigned long expires, 302extern void fib6_run_gc(unsigned long expires,
303 struct net *net); 303 struct net *net, bool force);
304 304
305extern void fib6_gc_cleanup(void); 305extern void fib6_gc_cleanup(void);
306 306
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 260f83f16bcf..f667248202b6 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -135,6 +135,8 @@ extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
135extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, 135extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
136 __be32 mtu); 136 __be32 mtu);
137extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark); 137extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
138extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
139 u32 mark);
138extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); 140extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
139 141
140struct netlink_callback; 142struct netlink_callback;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 781b3cf86a2f..a354db5b7662 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
145 return INET_ECN_encapsulate(tos, inner); 145 return INET_ECN_encapsulate(tos, inner);
146} 146}
147 147
148static inline void tunnel_ip_select_ident(struct sk_buff *skb,
149 const struct iphdr *old_iph,
150 struct dst_entry *dst)
151{
152 struct iphdr *iph = ip_hdr(skb);
153
154 /* Use inner packet iph-id if possible. */
155 if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
156 iph->id = old_iph->id;
157 else
158 __ip_select_ident(iph, dst,
159 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
160}
161
162int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); 148int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
163int iptunnel_xmit(struct net *net, struct rtable *rt, 149int iptunnel_xmit(struct net *net, struct rtable *rt,
164 struct sk_buff *skb, 150 struct sk_buff *skb,
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 949d77528f2f..6fea32340ae8 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -119,7 +119,7 @@ extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
119 * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may 119 * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
120 * also need a pad of 2. 120 * also need a pad of 2.
121 */ 121 */
122static int ndisc_addr_option_pad(unsigned short type) 122static inline int ndisc_addr_option_pad(unsigned short type)
123{ 123{
124 switch (type) { 124 switch (type) {
125 case ARPHRD_INFINIBAND: return 2; 125 case ARPHRD_INFINIBAND: return 2;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 0af851c3b038..b64b7bce4b94 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -59,7 +59,7 @@ struct nfc_hci_ops {
59 struct nfc_target *target); 59 struct nfc_target *target);
60 int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event, 60 int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event,
61 struct sk_buff *skb); 61 struct sk_buff *skb);
62 int (*fw_upload)(struct nfc_hci_dev *hdev, const char *firmware_name); 62 int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name);
63 int (*discover_se)(struct nfc_hci_dev *dev); 63 int (*discover_se)(struct nfc_hci_dev *dev);
64 int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx); 64 int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
65 int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx); 65 int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 0e353f1658bb..5f286b726bb6 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -68,7 +68,7 @@ struct nfc_ops {
68 void *cb_context); 68 void *cb_context);
69 int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb); 69 int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb);
70 int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target); 70 int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target);
71 int (*fw_upload)(struct nfc_dev *dev, const char *firmware_name); 71 int (*fw_download)(struct nfc_dev *dev, const char *firmware_name);
72 72
73 /* Secure Element API */ 73 /* Secure Element API */
74 int (*discover_se)(struct nfc_dev *dev); 74 int (*discover_se)(struct nfc_dev *dev);
@@ -127,7 +127,7 @@ struct nfc_dev {
127 int targets_generation; 127 int targets_generation;
128 struct device dev; 128 struct device dev;
129 bool dev_up; 129 bool dev_up;
130 bool fw_upload_in_progress; 130 bool fw_download_in_progress;
131 u8 rf_mode; 131 u8 rf_mode;
132 bool polling; 132 bool polling;
133 struct nfc_target *active_target; 133 struct nfc_target *active_target;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6eab63363e59..e5ae0c50fa9c 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -683,13 +683,19 @@ struct psched_ratecfg {
683 u64 rate_bytes_ps; /* bytes per second */ 683 u64 rate_bytes_ps; /* bytes per second */
684 u32 mult; 684 u32 mult;
685 u16 overhead; 685 u16 overhead;
686 u8 linklayer;
686 u8 shift; 687 u8 shift;
687}; 688};
688 689
689static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 690static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
690 unsigned int len) 691 unsigned int len)
691{ 692{
692 return ((u64)(len + r->overhead) * r->mult) >> r->shift; 693 len += r->overhead;
694
695 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
696 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
697
698 return ((u64)len * r->mult) >> r->shift;
693} 699}
694 700
695extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); 701extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
@@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
700 memset(res, 0, sizeof(*res)); 706 memset(res, 0, sizeof(*res));
701 res->rate = r->rate_bytes_ps; 707 res->rate = r->rate_bytes_ps;
702 res->overhead = r->overhead; 708 res->overhead = r->overhead;
709 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
703} 710}
704 711
705#endif 712#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 95a5a2c6925a..31d5cfbb51ec 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -327,7 +327,7 @@ struct sock {
327#ifdef CONFIG_RPS 327#ifdef CONFIG_RPS
328 __u32 sk_rxhash; 328 __u32 sk_rxhash;
329#endif 329#endif
330#ifdef CONFIG_NET_LL_RX_POLL 330#ifdef CONFIG_NET_RX_BUSY_POLL
331 unsigned int sk_napi_id; 331 unsigned int sk_napi_id;
332 unsigned int sk_ll_usec; 332 unsigned int sk_ll_usec;
333#endif 333#endif
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 3cc5a0b278c3..5ebda976ea93 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -9,9 +9,7 @@
9struct search; 9struct search;
10 10
11DECLARE_EVENT_CLASS(bcache_request, 11DECLARE_EVENT_CLASS(bcache_request,
12
13 TP_PROTO(struct search *s, struct bio *bio), 12 TP_PROTO(struct search *s, struct bio *bio),
14
15 TP_ARGS(s, bio), 13 TP_ARGS(s, bio),
16 14
17 TP_STRUCT__entry( 15 TP_STRUCT__entry(
@@ -22,7 +20,6 @@ DECLARE_EVENT_CLASS(bcache_request,
22 __field(dev_t, orig_sector ) 20 __field(dev_t, orig_sector )
23 __field(unsigned int, nr_sector ) 21 __field(unsigned int, nr_sector )
24 __array(char, rwbs, 6 ) 22 __array(char, rwbs, 6 )
25 __array(char, comm, TASK_COMM_LEN )
26 ), 23 ),
27 24
28 TP_fast_assign( 25 TP_fast_assign(
@@ -33,36 +30,66 @@ DECLARE_EVENT_CLASS(bcache_request,
33 __entry->orig_sector = bio->bi_sector - 16; 30 __entry->orig_sector = bio->bi_sector - 16;
34 __entry->nr_sector = bio->bi_size >> 9; 31 __entry->nr_sector = bio->bi_size >> 9;
35 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 32 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
36 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
37 ), 33 ),
38 34
39 TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d @ %llu)", 35 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
40 MAJOR(__entry->dev), MINOR(__entry->dev), 36 MAJOR(__entry->dev), MINOR(__entry->dev),
41 __entry->rwbs, 37 __entry->rwbs, (unsigned long long)__entry->sector,
42 (unsigned long long)__entry->sector, 38 __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
43 __entry->nr_sector, __entry->comm,
44 __entry->orig_major, __entry->orig_minor,
45 (unsigned long long)__entry->orig_sector) 39 (unsigned long long)__entry->orig_sector)
46); 40);
47 41
48DEFINE_EVENT(bcache_request, bcache_request_start, 42DECLARE_EVENT_CLASS(bkey,
43 TP_PROTO(struct bkey *k),
44 TP_ARGS(k),
49 45
50 TP_PROTO(struct search *s, struct bio *bio), 46 TP_STRUCT__entry(
47 __field(u32, size )
48 __field(u32, inode )
49 __field(u64, offset )
50 __field(bool, dirty )
51 ),
51 52
52 TP_ARGS(s, bio) 53 TP_fast_assign(
54 __entry->inode = KEY_INODE(k);
55 __entry->offset = KEY_OFFSET(k);
56 __entry->size = KEY_SIZE(k);
57 __entry->dirty = KEY_DIRTY(k);
58 ),
59
60 TP_printk("%u:%llu len %u dirty %u", __entry->inode,
61 __entry->offset, __entry->size, __entry->dirty)
53); 62);
54 63
55DEFINE_EVENT(bcache_request, bcache_request_end, 64DECLARE_EVENT_CLASS(btree_node,
65 TP_PROTO(struct btree *b),
66 TP_ARGS(b),
67
68 TP_STRUCT__entry(
69 __field(size_t, bucket )
70 ),
56 71
72 TP_fast_assign(
73 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
74 ),
75
76 TP_printk("bucket %zu", __entry->bucket)
77);
78
79/* request.c */
80
81DEFINE_EVENT(bcache_request, bcache_request_start,
57 TP_PROTO(struct search *s, struct bio *bio), 82 TP_PROTO(struct search *s, struct bio *bio),
83 TP_ARGS(s, bio)
84);
58 85
86DEFINE_EVENT(bcache_request, bcache_request_end,
87 TP_PROTO(struct search *s, struct bio *bio),
59 TP_ARGS(s, bio) 88 TP_ARGS(s, bio)
60); 89);
61 90
62DECLARE_EVENT_CLASS(bcache_bio, 91DECLARE_EVENT_CLASS(bcache_bio,
63
64 TP_PROTO(struct bio *bio), 92 TP_PROTO(struct bio *bio),
65
66 TP_ARGS(bio), 93 TP_ARGS(bio),
67 94
68 TP_STRUCT__entry( 95 TP_STRUCT__entry(
@@ -70,7 +97,6 @@ DECLARE_EVENT_CLASS(bcache_bio,
70 __field(sector_t, sector ) 97 __field(sector_t, sector )
71 __field(unsigned int, nr_sector ) 98 __field(unsigned int, nr_sector )
72 __array(char, rwbs, 6 ) 99 __array(char, rwbs, 6 )
73 __array(char, comm, TASK_COMM_LEN )
74 ), 100 ),
75 101
76 TP_fast_assign( 102 TP_fast_assign(
@@ -78,191 +104,328 @@ DECLARE_EVENT_CLASS(bcache_bio,
78 __entry->sector = bio->bi_sector; 104 __entry->sector = bio->bi_sector;
79 __entry->nr_sector = bio->bi_size >> 9; 105 __entry->nr_sector = bio->bi_size >> 9;
80 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 106 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
81 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
82 ), 107 ),
83 108
84 TP_printk("%d,%d %s %llu + %u [%s]", 109 TP_printk("%d,%d %s %llu + %u",
85 MAJOR(__entry->dev), MINOR(__entry->dev), 110 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
86 __entry->rwbs, 111 (unsigned long long)__entry->sector, __entry->nr_sector)
87 (unsigned long long)__entry->sector,
88 __entry->nr_sector, __entry->comm)
89); 112);
90 113
91 114DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
92DEFINE_EVENT(bcache_bio, bcache_passthrough,
93
94 TP_PROTO(struct bio *bio), 115 TP_PROTO(struct bio *bio),
116 TP_ARGS(bio)
117);
95 118
119DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
120 TP_PROTO(struct bio *bio),
96 TP_ARGS(bio) 121 TP_ARGS(bio)
97); 122);
98 123
99DEFINE_EVENT(bcache_bio, bcache_cache_hit, 124TRACE_EVENT(bcache_read,
125 TP_PROTO(struct bio *bio, bool hit, bool bypass),
126 TP_ARGS(bio, hit, bypass),
100 127
101 TP_PROTO(struct bio *bio), 128 TP_STRUCT__entry(
129 __field(dev_t, dev )
130 __field(sector_t, sector )
131 __field(unsigned int, nr_sector )
132 __array(char, rwbs, 6 )
133 __field(bool, cache_hit )
134 __field(bool, bypass )
135 ),
102 136
103 TP_ARGS(bio) 137 TP_fast_assign(
138 __entry->dev = bio->bi_bdev->bd_dev;
139 __entry->sector = bio->bi_sector;
140 __entry->nr_sector = bio->bi_size >> 9;
141 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
142 __entry->cache_hit = hit;
143 __entry->bypass = bypass;
144 ),
145
146 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
147 MAJOR(__entry->dev), MINOR(__entry->dev),
148 __entry->rwbs, (unsigned long long)__entry->sector,
149 __entry->nr_sector, __entry->cache_hit, __entry->bypass)
104); 150);
105 151
106DEFINE_EVENT(bcache_bio, bcache_cache_miss, 152TRACE_EVENT(bcache_write,
153 TP_PROTO(struct bio *bio, bool writeback, bool bypass),
154 TP_ARGS(bio, writeback, bypass),
107 155
108 TP_PROTO(struct bio *bio), 156 TP_STRUCT__entry(
157 __field(dev_t, dev )
158 __field(sector_t, sector )
159 __field(unsigned int, nr_sector )
160 __array(char, rwbs, 6 )
161 __field(bool, writeback )
162 __field(bool, bypass )
163 ),
109 164
110 TP_ARGS(bio) 165 TP_fast_assign(
166 __entry->dev = bio->bi_bdev->bd_dev;
167 __entry->sector = bio->bi_sector;
168 __entry->nr_sector = bio->bi_size >> 9;
169 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
170 __entry->writeback = writeback;
171 __entry->bypass = bypass;
172 ),
173
174 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
175 MAJOR(__entry->dev), MINOR(__entry->dev),
176 __entry->rwbs, (unsigned long long)__entry->sector,
177 __entry->nr_sector, __entry->writeback, __entry->bypass)
111); 178);
112 179
113DEFINE_EVENT(bcache_bio, bcache_read_retry, 180DEFINE_EVENT(bcache_bio, bcache_read_retry,
114
115 TP_PROTO(struct bio *bio), 181 TP_PROTO(struct bio *bio),
116
117 TP_ARGS(bio) 182 TP_ARGS(bio)
118); 183);
119 184
120DEFINE_EVENT(bcache_bio, bcache_writethrough, 185DEFINE_EVENT(bkey, bcache_cache_insert,
186 TP_PROTO(struct bkey *k),
187 TP_ARGS(k)
188);
121 189
122 TP_PROTO(struct bio *bio), 190/* Journal */
123 191
124 TP_ARGS(bio) 192DECLARE_EVENT_CLASS(cache_set,
125); 193 TP_PROTO(struct cache_set *c),
194 TP_ARGS(c),
126 195
127DEFINE_EVENT(bcache_bio, bcache_writeback, 196 TP_STRUCT__entry(
197 __array(char, uuid, 16 )
198 ),
128 199
129 TP_PROTO(struct bio *bio), 200 TP_fast_assign(
201 memcpy(__entry->uuid, c->sb.set_uuid, 16);
202 ),
130 203
131 TP_ARGS(bio) 204 TP_printk("%pU", __entry->uuid)
132); 205);
133 206
134DEFINE_EVENT(bcache_bio, bcache_write_skip, 207DEFINE_EVENT(bkey, bcache_journal_replay_key,
135 208 TP_PROTO(struct bkey *k),
136 TP_PROTO(struct bio *bio), 209 TP_ARGS(k)
210);
137 211
138 TP_ARGS(bio) 212DEFINE_EVENT(cache_set, bcache_journal_full,
213 TP_PROTO(struct cache_set *c),
214 TP_ARGS(c)
139); 215);
140 216
141DEFINE_EVENT(bcache_bio, bcache_btree_read, 217DEFINE_EVENT(cache_set, bcache_journal_entry_full,
218 TP_PROTO(struct cache_set *c),
219 TP_ARGS(c)
220);
142 221
222DEFINE_EVENT(bcache_bio, bcache_journal_write,
143 TP_PROTO(struct bio *bio), 223 TP_PROTO(struct bio *bio),
144
145 TP_ARGS(bio) 224 TP_ARGS(bio)
146); 225);
147 226
148DEFINE_EVENT(bcache_bio, bcache_btree_write, 227/* Btree */
149 228
150 TP_PROTO(struct bio *bio), 229DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
230 TP_PROTO(struct cache_set *c),
231 TP_ARGS(c)
232);
151 233
152 TP_ARGS(bio) 234DEFINE_EVENT(btree_node, bcache_btree_read,
235 TP_PROTO(struct btree *b),
236 TP_ARGS(b)
153); 237);
154 238
155DEFINE_EVENT(bcache_bio, bcache_write_dirty, 239TRACE_EVENT(bcache_btree_write,
240 TP_PROTO(struct btree *b),
241 TP_ARGS(b),
156 242
157 TP_PROTO(struct bio *bio), 243 TP_STRUCT__entry(
244 __field(size_t, bucket )
245 __field(unsigned, block )
246 __field(unsigned, keys )
247 ),
158 248
159 TP_ARGS(bio) 249 TP_fast_assign(
250 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
251 __entry->block = b->written;
252 __entry->keys = b->sets[b->nsets].data->keys;
253 ),
254
255 TP_printk("bucket %zu", __entry->bucket)
160); 256);
161 257
162DEFINE_EVENT(bcache_bio, bcache_read_dirty, 258DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
259 TP_PROTO(struct btree *b),
260 TP_ARGS(b)
261);
163 262
164 TP_PROTO(struct bio *bio), 263DEFINE_EVENT(btree_node, bcache_btree_node_alloc_fail,
264 TP_PROTO(struct btree *b),
265 TP_ARGS(b)
266);
165 267
166 TP_ARGS(bio) 268DEFINE_EVENT(btree_node, bcache_btree_node_free,
269 TP_PROTO(struct btree *b),
270 TP_ARGS(b)
167); 271);
168 272
169DEFINE_EVENT(bcache_bio, bcache_write_moving, 273TRACE_EVENT(bcache_btree_gc_coalesce,
274 TP_PROTO(unsigned nodes),
275 TP_ARGS(nodes),
170 276
171 TP_PROTO(struct bio *bio), 277 TP_STRUCT__entry(
278 __field(unsigned, nodes )
279 ),
172 280
173 TP_ARGS(bio) 281 TP_fast_assign(
282 __entry->nodes = nodes;
283 ),
284
285 TP_printk("coalesced %u nodes", __entry->nodes)
174); 286);
175 287
176DEFINE_EVENT(bcache_bio, bcache_read_moving, 288DEFINE_EVENT(cache_set, bcache_gc_start,
289 TP_PROTO(struct cache_set *c),
290 TP_ARGS(c)
291);
177 292
178 TP_PROTO(struct bio *bio), 293DEFINE_EVENT(cache_set, bcache_gc_end,
294 TP_PROTO(struct cache_set *c),
295 TP_ARGS(c)
296);
179 297
180 TP_ARGS(bio) 298DEFINE_EVENT(bkey, bcache_gc_copy,
299 TP_PROTO(struct bkey *k),
300 TP_ARGS(k)
181); 301);
182 302
183DEFINE_EVENT(bcache_bio, bcache_journal_write, 303DEFINE_EVENT(bkey, bcache_gc_copy_collision,
304 TP_PROTO(struct bkey *k),
305 TP_ARGS(k)
306);
184 307
185 TP_PROTO(struct bio *bio), 308TRACE_EVENT(bcache_btree_insert_key,
309 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
310 TP_ARGS(b, k, op, status),
186 311
187 TP_ARGS(bio) 312 TP_STRUCT__entry(
188); 313 __field(u64, btree_node )
314 __field(u32, btree_level )
315 __field(u32, inode )
316 __field(u64, offset )
317 __field(u32, size )
318 __field(u8, dirty )
319 __field(u8, op )
320 __field(u8, status )
321 ),
189 322
190DECLARE_EVENT_CLASS(bcache_cache_bio, 323 TP_fast_assign(
324 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
325 __entry->btree_level = b->level;
326 __entry->inode = KEY_INODE(k);
327 __entry->offset = KEY_OFFSET(k);
328 __entry->size = KEY_SIZE(k);
329 __entry->dirty = KEY_DIRTY(k);
330 __entry->op = op;
331 __entry->status = status;
332 ),
191 333
192 TP_PROTO(struct bio *bio, 334 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
193 sector_t orig_sector, 335 __entry->status, __entry->op,
194 struct block_device* orig_bdev), 336 __entry->btree_node, __entry->btree_level,
337 __entry->inode, __entry->offset,
338 __entry->size, __entry->dirty)
339);
195 340
196 TP_ARGS(bio, orig_sector, orig_bdev), 341DECLARE_EVENT_CLASS(btree_split,
342 TP_PROTO(struct btree *b, unsigned keys),
343 TP_ARGS(b, keys),
197 344
198 TP_STRUCT__entry( 345 TP_STRUCT__entry(
199 __field(dev_t, dev ) 346 __field(size_t, bucket )
200 __field(dev_t, orig_dev ) 347 __field(unsigned, keys )
201 __field(sector_t, sector )
202 __field(sector_t, orig_sector )
203 __field(unsigned int, nr_sector )
204 __array(char, rwbs, 6 )
205 __array(char, comm, TASK_COMM_LEN )
206 ), 348 ),
207 349
208 TP_fast_assign( 350 TP_fast_assign(
209 __entry->dev = bio->bi_bdev->bd_dev; 351 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
210 __entry->orig_dev = orig_bdev->bd_dev; 352 __entry->keys = keys;
211 __entry->sector = bio->bi_sector;
212 __entry->orig_sector = orig_sector;
213 __entry->nr_sector = bio->bi_size >> 9;
214 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
215 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
216 ), 353 ),
217 354
218 TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d %llu)", 355 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
219 MAJOR(__entry->dev), MINOR(__entry->dev),
220 __entry->rwbs,
221 (unsigned long long)__entry->sector,
222 __entry->nr_sector, __entry->comm,
223 MAJOR(__entry->orig_dev), MINOR(__entry->orig_dev),
224 (unsigned long long)__entry->orig_sector)
225); 356);
226 357
227DEFINE_EVENT(bcache_cache_bio, bcache_cache_insert, 358DEFINE_EVENT(btree_split, bcache_btree_node_split,
228 359 TP_PROTO(struct btree *b, unsigned keys),
229 TP_PROTO(struct bio *bio, 360 TP_ARGS(b, keys)
230 sector_t orig_sector, 361);
231 struct block_device *orig_bdev),
232 362
233 TP_ARGS(bio, orig_sector, orig_bdev) 363DEFINE_EVENT(btree_split, bcache_btree_node_compact,
364 TP_PROTO(struct btree *b, unsigned keys),
365 TP_ARGS(b, keys)
234); 366);
235 367
236DECLARE_EVENT_CLASS(bcache_gc, 368DEFINE_EVENT(btree_node, bcache_btree_set_root,
369 TP_PROTO(struct btree *b),
370 TP_ARGS(b)
371);
237 372
238 TP_PROTO(uint8_t *uuid), 373/* Allocator */
239 374
240 TP_ARGS(uuid), 375TRACE_EVENT(bcache_alloc_invalidate,
376 TP_PROTO(struct cache *ca),
377 TP_ARGS(ca),
241 378
242 TP_STRUCT__entry( 379 TP_STRUCT__entry(
243 __field(uint8_t *, uuid) 380 __field(unsigned, free )
381 __field(unsigned, free_inc )
382 __field(unsigned, free_inc_size )
383 __field(unsigned, unused )
244 ), 384 ),
245 385
246 TP_fast_assign( 386 TP_fast_assign(
247 __entry->uuid = uuid; 387 __entry->free = fifo_used(&ca->free);
388 __entry->free_inc = fifo_used(&ca->free_inc);
389 __entry->free_inc_size = ca->free_inc.size;
390 __entry->unused = fifo_used(&ca->unused);
248 ), 391 ),
249 392
250 TP_printk("%pU", __entry->uuid) 393 TP_printk("free %u free_inc %u/%u unused %u", __entry->free,
394 __entry->free_inc, __entry->free_inc_size, __entry->unused)
251); 395);
252 396
397TRACE_EVENT(bcache_alloc_fail,
398 TP_PROTO(struct cache *ca),
399 TP_ARGS(ca),
253 400
254DEFINE_EVENT(bcache_gc, bcache_gc_start, 401 TP_STRUCT__entry(
402 __field(unsigned, free )
403 __field(unsigned, free_inc )
404 __field(unsigned, unused )
405 __field(unsigned, blocked )
406 ),
255 407
256 TP_PROTO(uint8_t *uuid), 408 TP_fast_assign(
409 __entry->free = fifo_used(&ca->free);
410 __entry->free_inc = fifo_used(&ca->free_inc);
411 __entry->unused = fifo_used(&ca->unused);
412 __entry->blocked = atomic_read(&ca->set->prio_blocked);
413 ),
257 414
258 TP_ARGS(uuid) 415 TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free,
416 __entry->free_inc, __entry->unused, __entry->blocked)
259); 417);
260 418
261DEFINE_EVENT(bcache_gc, bcache_gc_end, 419/* Background writeback */
262 420
263 TP_PROTO(uint8_t *uuid), 421DEFINE_EVENT(bkey, bcache_writeback,
422 TP_PROTO(struct bkey *k),
423 TP_ARGS(k)
424);
264 425
265 TP_ARGS(uuid) 426DEFINE_EVENT(bkey, bcache_writeback_collision,
427 TP_PROTO(struct bkey *k),
428 TP_ARGS(k)
266); 429);
267 430
268#endif /* _TRACE_BCACHE_H */ 431#endif /* _TRACE_BCACHE_H */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index d615f78cc6b6..41a6643e2136 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -670,10 +670,6 @@ perf_trace_##call(void *__data, proto) \
670 sizeof(u64)); \ 670 sizeof(u64)); \
671 __entry_size -= sizeof(u32); \ 671 __entry_size -= sizeof(u32); \
672 \ 672 \
673 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
674 "profile buffer not large enough")) \
675 return; \
676 \
677 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ 673 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
678 __entry_size, event_call->event.type, &__regs, &rctx); \ 674 __entry_size, event_call->event.type, &__regs, &rctx); \
679 if (!entry) \ 675 if (!entry) \
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 119487e05e65..2d9a25daab05 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -16,3 +16,4 @@ header-y += sis_drm.h
16header-y += tegra_drm.h 16header-y += tegra_drm.h
17header-y += via_drm.h 17header-y += via_drm.h
18header-y += vmwgfx_drm.h 18header-y += vmwgfx_drm.h
19header-y += msm_drm.h
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 238a166b9fe6..ece867889cc7 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -181,7 +181,7 @@ enum drm_map_type {
181 _DRM_AGP = 3, /**< AGP/GART */ 181 _DRM_AGP = 3, /**< AGP/GART */
182 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ 182 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
183 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ 183 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
184 _DRM_GEM = 6, /**< GEM object */ 184 _DRM_GEM = 6, /**< GEM object (obsolete) */
185}; 185};
186 186
187/** 187/**
@@ -780,6 +780,7 @@ struct drm_event_vblank {
780#define DRM_CAP_DUMB_PREFER_SHADOW 0x4 780#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
781#define DRM_CAP_PRIME 0x5 781#define DRM_CAP_PRIME 0x5
782#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 782#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
783#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
783 784
784#define DRM_PRIME_CAP_IMPORT 0x1 785#define DRM_PRIME_CAP_IMPORT 0x1
785#define DRM_PRIME_CAP_EXPORT 0x2 786#define DRM_PRIME_CAP_EXPORT 0x2
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 53db7cea373b..550811712f78 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -412,7 +412,8 @@ struct drm_mode_crtc_lut {
412}; 412};
413 413
414#define DRM_MODE_PAGE_FLIP_EVENT 0x01 414#define DRM_MODE_PAGE_FLIP_EVENT 0x01
415#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT 415#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
416#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
416 417
417/* 418/*
418 * Request a page flip on the specified crtc. 419 * Request a page flip on the specified crtc.
@@ -426,11 +427,14 @@ struct drm_mode_crtc_lut {
426 * flip is already pending as the ioctl is called, EBUSY will be 427 * flip is already pending as the ioctl is called, EBUSY will be
427 * returned. 428 * returned.
428 * 429 *
429 * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will 430 * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank
430 * request that drm sends back a vblank event (see drm.h: struct 431 * event (see drm.h: struct drm_event_vblank) when the page flip is
431 * drm_event_vblank) when the page flip is done. The user_data field 432 * done. The user_data field passed in with this ioctl will be
432 * passed in with this ioctl will be returned as the user_data field 433 * returned as the user_data field in the vblank event struct.
433 * in the vblank event struct. 434 *
435 * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen
436 * 'as soon as possible', meaning that it not delay waiting for vblank.
437 * This may cause tearing on the screen.
434 * 438 *
435 * The reserved field must be zero until we figure out something 439 * The reserved field must be zero until we figure out something
436 * clever to use it for. 440 * clever to use it for.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 923ed7fe5775..55bb5729bd78 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -33,6 +33,30 @@
33 * subject to backwards-compatibility constraints. 33 * subject to backwards-compatibility constraints.
34 */ 34 */
35 35
36/**
37 * DOC: uevents generated by i915 on it's device node
38 *
39 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
40 * event from the gpu l3 cache. Additional information supplied is ROW,
41 * BANK, SUBBANK of the affected cacheline. Userspace should keep track of
42 * these events and if a specific cache-line seems to have a persistent
43 * error remap it with the l3 remapping tool supplied in intel-gpu-tools.
44 * The value supplied with the event is always 1.
45 *
46 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
47 * hangcheck. The error detection event is a good indicator of when things
48 * began to go badly. The value supplied with the event is a 1 upon error
49 * detection, and a 0 upon reset completion, signifying no more error
50 * exists. NOTE: Disabling hangcheck or reset via module parameter will
51 * cause the related events to not be seen.
52 *
53 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
54 * the GPU. The value supplied with the event is always 1. NOTE: Disable
55 * reset via module parameter will cause this event to not be seen.
56 */
57#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
58#define I915_ERROR_UEVENT "ERROR"
59#define I915_RESET_UEVENT "RESET"
36 60
37/* Each region is a minimum of 16k, and there are at most 255 of them. 61/* Each region is a minimum of 16k, and there are at most 255 of them.
38 */ 62 */
@@ -310,6 +334,7 @@ typedef struct drm_i915_irq_wait {
310#define I915_PARAM_HAS_PINNED_BATCHES 24 334#define I915_PARAM_HAS_PINNED_BATCHES 24
311#define I915_PARAM_HAS_EXEC_NO_RELOC 25 335#define I915_PARAM_HAS_EXEC_NO_RELOC 25
312#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 336#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
337#define I915_PARAM_HAS_WT 27
313 338
314typedef struct drm_i915_getparam { 339typedef struct drm_i915_getparam {
315 int param; 340 int param;
@@ -744,8 +769,32 @@ struct drm_i915_gem_busy {
744 __u32 busy; 769 __u32 busy;
745}; 770};
746 771
772/**
773 * I915_CACHING_NONE
774 *
775 * GPU access is not coherent with cpu caches. Default for machines without an
776 * LLC.
777 */
747#define I915_CACHING_NONE 0 778#define I915_CACHING_NONE 0
779/**
780 * I915_CACHING_CACHED
781 *
782 * GPU access is coherent with cpu caches and furthermore the data is cached in
783 * last-level caches shared between cpu cores and the gpu GT. Default on
784 * machines with HAS_LLC.
785 */
748#define I915_CACHING_CACHED 1 786#define I915_CACHING_CACHED 1
787/**
788 * I915_CACHING_DISPLAY
789 *
790 * Special GPU caching mode which is coherent with the scanout engines.
791 * Transparently falls back to I915_CACHING_NONE on platforms where no special
792 * cache mode (like write-through or gfdt flushing) is available. The kernel
793 * automatically sets this mode when using a buffer as a scanout target.
794 * Userspace can manually set this mode to avoid a costly stall and clflush in
795 * the hotpath of drawing the first frame.
796 */
797#define I915_CACHING_DISPLAY 2
749 798
750struct drm_i915_gem_caching { 799struct drm_i915_gem_caching {
751 /** 800 /**
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
new file mode 100644
index 000000000000..d3c62074016d
--- /dev/null
+++ b/include/uapi/drm/msm_drm.h
@@ -0,0 +1,207 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_DRM_H__
19#define __MSM_DRM_H__
20
21#include <stddef.h>
22#include <drm/drm.h>
23
24/* Please note that modifications to all structs defined here are
25 * subject to backwards-compatibility constraints:
26 * 1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit
27 * user/kernel compatibility
28 * 2) Keep fields aligned to their size
29 * 3) Because of how drm_ioctl() works, we can add new fields at
30 * the end of an ioctl if some care is taken: drm_ioctl() will
31 * zero out the new fields at the tail of the ioctl, so a zero
32 * value should have a backwards compatible meaning. And for
33 * output params, userspace won't see the newly added output
34 * fields.. so that has to be somehow ok.
35 */
36
37#define MSM_PIPE_NONE 0x00
38#define MSM_PIPE_2D0 0x01
39#define MSM_PIPE_2D1 0x02
40#define MSM_PIPE_3D0 0x10
41
42/* timeouts are specified in clock-monotonic absolute times (to simplify
43 * restarting interrupted ioctls). The following struct is logically the
44 * same as 'struct timespec' but 32/64b ABI safe.
45 */
46struct drm_msm_timespec {
47 int64_t tv_sec; /* seconds */
48 int64_t tv_nsec; /* nanoseconds */
49};
50
51#define MSM_PARAM_GPU_ID 0x01
52#define MSM_PARAM_GMEM_SIZE 0x02
53
54struct drm_msm_param {
55 uint32_t pipe; /* in, MSM_PIPE_x */
56 uint32_t param; /* in, MSM_PARAM_x */
57 uint64_t value; /* out (get_param) or in (set_param) */
58};
59
60/*
61 * GEM buffers:
62 */
63
64#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
65#define MSM_BO_GPU_READONLY 0x00000002
66#define MSM_BO_CACHE_MASK 0x000f0000
67/* cache modes */
68#define MSM_BO_CACHED 0x00010000
69#define MSM_BO_WC 0x00020000
70#define MSM_BO_UNCACHED 0x00040000
71
72struct drm_msm_gem_new {
73 uint64_t size; /* in */
74 uint32_t flags; /* in, mask of MSM_BO_x */
75 uint32_t handle; /* out */
76};
77
78struct drm_msm_gem_info {
79 uint32_t handle; /* in */
80 uint32_t pad;
81 uint64_t offset; /* out, offset to pass to mmap() */
82};
83
84#define MSM_PREP_READ 0x01
85#define MSM_PREP_WRITE 0x02
86#define MSM_PREP_NOSYNC 0x04
87
88struct drm_msm_gem_cpu_prep {
89 uint32_t handle; /* in */
90 uint32_t op; /* in, mask of MSM_PREP_x */
91 struct drm_msm_timespec timeout; /* in */
92};
93
94struct drm_msm_gem_cpu_fini {
95 uint32_t handle; /* in */
96};
97
98/*
99 * Cmdstream Submission:
100 */
101
102/* The value written into the cmdstream is logically:
103 *
104 * ((relocbuf->gpuaddr + reloc_offset) << shift) | or
105 *
106 * When we have GPU's w/ >32bit ptrs, it should be possible to deal
107 * with this by emit'ing two reloc entries with appropriate shift
108 * values. Or a new MSM_SUBMIT_CMD_x type would also be an option.
109 *
110 * NOTE that reloc's must be sorted by order of increasing submit_offset,
111 * otherwise EINVAL.
112 */
113struct drm_msm_gem_submit_reloc {
114 uint32_t submit_offset; /* in, offset from submit_bo */
115 uint32_t or; /* in, value OR'd with result */
116 int32_t shift; /* in, amount of left shift (can be negative) */
117 uint32_t reloc_idx; /* in, index of reloc_bo buffer */
118 uint64_t reloc_offset; /* in, offset from start of reloc_bo */
119};
120
121/* submit-types:
122 * BUF - this cmd buffer is executed normally.
123 * IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are
124 * processed normally, but the kernel does not setup an IB to
125 * this buffer in the first-level ringbuffer
126 * CTX_RESTORE_BUF - only executed if there has been a GPU context
127 * switch since the last SUBMIT ioctl
128 */
129#define MSM_SUBMIT_CMD_BUF 0x0001
130#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
131#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
132struct drm_msm_gem_submit_cmd {
133 uint32_t type; /* in, one of MSM_SUBMIT_CMD_x */
134 uint32_t submit_idx; /* in, index of submit_bo cmdstream buffer */
135 uint32_t submit_offset; /* in, offset into submit_bo */
136 uint32_t size; /* in, cmdstream size */
137 uint32_t pad;
138 uint32_t nr_relocs; /* in, number of submit_reloc's */
139 uint64_t __user relocs; /* in, ptr to array of submit_reloc's */
140};
141
142/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
143 * cmdstream buffer(s) themselves or reloc entries) has one (and only
144 * one) entry in the submit->bos[] table.
145 *
146 * As a optimization, the current buffer (gpu virtual address) can be
147 * passed back through the 'presumed' field. If on a subsequent reloc,
148 * userspace passes back a 'presumed' address that is still valid,
149 * then patching the cmdstream for this entry is skipped. This can
150 * avoid kernel needing to map/access the cmdstream bo in the common
151 * case.
152 */
153#define MSM_SUBMIT_BO_READ 0x0001
154#define MSM_SUBMIT_BO_WRITE 0x0002
155struct drm_msm_gem_submit_bo {
156 uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */
157 uint32_t handle; /* in, GEM handle */
158 uint64_t presumed; /* in/out, presumed buffer address */
159};
160
161/* Each cmdstream submit consists of a table of buffers involved, and
162 * one or more cmdstream buffers. This allows for conditional execution
163 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
164 */
165struct drm_msm_gem_submit {
166 uint32_t pipe; /* in, MSM_PIPE_x */
167 uint32_t fence; /* out */
168 uint32_t nr_bos; /* in, number of submit_bo's */
169 uint32_t nr_cmds; /* in, number of submit_cmd's */
170 uint64_t __user bos; /* in, ptr to array of submit_bo's */
171 uint64_t __user cmds; /* in, ptr to array of submit_cmd's */
172};
173
174/* The normal way to synchronize with the GPU is just to CPU_PREP on
175 * a buffer if you need to access it from the CPU (other cmdstream
176 * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
177 * handle the required synchronization under the hood). This ioctl
178 * mainly just exists as a way to implement the gallium pipe_fence
179 * APIs without requiring a dummy bo to synchronize on.
180 */
181struct drm_msm_wait_fence {
182 uint32_t fence; /* in */
183 uint32_t pad;
184 struct drm_msm_timespec timeout; /* in */
185};
186
187#define DRM_MSM_GET_PARAM 0x00
188/* placeholder:
189#define DRM_MSM_SET_PARAM 0x01
190 */
191#define DRM_MSM_GEM_NEW 0x02
192#define DRM_MSM_GEM_INFO 0x03
193#define DRM_MSM_GEM_CPU_PREP 0x04
194#define DRM_MSM_GEM_CPU_FINI 0x05
195#define DRM_MSM_GEM_SUBMIT 0x06
196#define DRM_MSM_WAIT_FENCE 0x07
197#define DRM_MSM_NUM_IOCTLS 0x08
198
199#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
200#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
201#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
202#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
203#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
204#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
205#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
206
207#endif /* __MSM_DRM_H__ */
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 321d4ac5c512..fa8b3adf9ffb 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -979,6 +979,8 @@ struct drm_radeon_cs {
979#define RADEON_INFO_RING_WORKING 0x15 979#define RADEON_INFO_RING_WORKING 0x15
980/* SI tile mode array */ 980/* SI tile mode array */
981#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16 981#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
982/* query if CP DMA is supported on the compute ring */
983#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
982 984
983 985
984struct drm_radeon_info { 986struct drm_radeon_info {
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index d50036953497..1db453e4b550 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -215,8 +215,8 @@ struct fw_cdev_event_request2 {
215 * with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with 215 * with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
216 * %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets 216 * %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
217 * without the interrupt bit set that the kernel's internal buffer for @header 217 * without the interrupt bit set that the kernel's internal buffer for @header
218 * is about to overflow. (In the last case, kernels with ABI version < 5 drop 218 * is about to overflow. (In the last case, ABI versions < 5 drop header data
219 * header data up to the next interrupt packet.) 219 * up to the next interrupt packet.)
220 * 220 *
221 * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT): 221 * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
222 * 222 *
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 6cf06bfd841b..2fee45bdec0a 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -133,4 +133,38 @@ struct ip_beet_phdr {
133 __u8 reserved; 133 __u8 reserved;
134}; 134};
135 135
136/* index values for the variables in ipv4_devconf */
137enum
138{
139 IPV4_DEVCONF_FORWARDING=1,
140 IPV4_DEVCONF_MC_FORWARDING,
141 IPV4_DEVCONF_PROXY_ARP,
142 IPV4_DEVCONF_ACCEPT_REDIRECTS,
143 IPV4_DEVCONF_SECURE_REDIRECTS,
144 IPV4_DEVCONF_SEND_REDIRECTS,
145 IPV4_DEVCONF_SHARED_MEDIA,
146 IPV4_DEVCONF_RP_FILTER,
147 IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
148 IPV4_DEVCONF_BOOTP_RELAY,
149 IPV4_DEVCONF_LOG_MARTIANS,
150 IPV4_DEVCONF_TAG,
151 IPV4_DEVCONF_ARPFILTER,
152 IPV4_DEVCONF_MEDIUM_ID,
153 IPV4_DEVCONF_NOXFRM,
154 IPV4_DEVCONF_NOPOLICY,
155 IPV4_DEVCONF_FORCE_IGMP_VERSION,
156 IPV4_DEVCONF_ARP_ANNOUNCE,
157 IPV4_DEVCONF_ARP_IGNORE,
158 IPV4_DEVCONF_PROMOTE_SECONDARIES,
159 IPV4_DEVCONF_ARP_ACCEPT,
160 IPV4_DEVCONF_ARP_NOTIFY,
161 IPV4_DEVCONF_ACCEPT_LOCAL,
162 IPV4_DEVCONF_SRC_VMARK,
163 IPV4_DEVCONF_PROXY_ARP_PVLAN,
164 IPV4_DEVCONF_ROUTE_LOCALNET,
165 __IPV4_DEVCONF_MAX
166};
167
168#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
169
136#endif /* _UAPI_LINUX_IP_H */ 170#endif /* _UAPI_LINUX_IP_H */
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index caed0f324d5f..8137dd8d2adf 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -69,8 +69,8 @@
69 * starting a poll from a device which has a secure element enabled means 69 * starting a poll from a device which has a secure element enabled means
70 * we want to do SE based card emulation. 70 * we want to do SE based card emulation.
71 * @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element. 71 * @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element.
72 * @NFC_CMD_FW_UPLOAD: Request to Load/flash firmware, or event to inform that 72 * @NFC_CMD_FW_DOWNLOAD: Request to Load/flash firmware, or event to inform
73 * some firmware was loaded 73 * that some firmware was loaded
74 */ 74 */
75enum nfc_commands { 75enum nfc_commands {
76 NFC_CMD_UNSPEC, 76 NFC_CMD_UNSPEC,
@@ -94,7 +94,7 @@ enum nfc_commands {
94 NFC_CMD_DISABLE_SE, 94 NFC_CMD_DISABLE_SE,
95 NFC_CMD_LLC_SDREQ, 95 NFC_CMD_LLC_SDREQ,
96 NFC_EVENT_LLC_SDRES, 96 NFC_EVENT_LLC_SDRES,
97 NFC_CMD_FW_UPLOAD, 97 NFC_CMD_FW_DOWNLOAD,
98 NFC_EVENT_SE_ADDED, 98 NFC_EVENT_SE_ADDED,
99 NFC_EVENT_SE_REMOVED, 99 NFC_EVENT_SE_REMOVED,
100/* private: internal use only */ 100/* private: internal use only */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index dbd71b0c7d8c..09d62b9228ff 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -73,9 +73,17 @@ struct tc_estimator {
73#define TC_H_ROOT (0xFFFFFFFFU) 73#define TC_H_ROOT (0xFFFFFFFFU)
74#define TC_H_INGRESS (0xFFFFFFF1U) 74#define TC_H_INGRESS (0xFFFFFFF1U)
75 75
76/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
77enum tc_link_layer {
78 TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
79 TC_LINKLAYER_ETHERNET,
80 TC_LINKLAYER_ATM,
81};
82#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
83
76struct tc_ratespec { 84struct tc_ratespec {
77 unsigned char cell_log; 85 unsigned char cell_log;
78 unsigned char __reserved; 86 __u8 linklayer; /* lower 4 bits */
79 unsigned short overhead; 87 unsigned short overhead;
80 short cell_align; 88 short cell_align;
81 unsigned short mpu; 89 unsigned short mpu;
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index af0a674cc677..a1356d3b54df 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -253,7 +253,7 @@ enum
253 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ 253 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
254 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ 254 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
255 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ 255 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
256 LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ 256 LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
257 __LINUX_MIB_MAX 257 __LINUX_MIB_MAX
258}; 258};
259 259
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 7692dc69ccf7..331499d597fa 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -11,6 +11,17 @@
11 11
12#include <linux/types.h> /* __u8 etc */ 12#include <linux/types.h> /* __u8 etc */
13 13
14/* This is arbitrary.
15 * From USB 2.0 spec Table 11-13, offset 7, a hub can
16 * have up to 255 ports. The most yet reported is 10.
17 *
18 * Current Wireless USB host hardware (Intel i1480 for example) allows
19 * up to 22 devices to connect. Upcoming hardware might raise that
20 * limit. Because the arrays need to add a bit for hub status data, we
21 * use 31, so plus one evens out to four bytes.
22 */
23#define USB_MAXCHILDREN 31
24
14/* 25/*
15 * Hub request types 26 * Hub request types
16 */ 27 */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ffd4652de91c..65e12099ef89 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -103,12 +103,46 @@ typedef uint64_t blkif_sector_t;
103#define BLKIF_OP_DISCARD 5 103#define BLKIF_OP_DISCARD 5
104 104
105/* 105/*
106 * Recognized if "feature-max-indirect-segments" in present in the backend
107 * xenbus info. The "feature-max-indirect-segments" node contains the maximum
108 * number of segments allowed by the backend per request. If the node is
109 * present, the frontend might use blkif_request_indirect structs in order to
110 * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
111 * maximum number of indirect segments is fixed by the backend, but the
112 * frontend can issue requests with any number of indirect segments as long as
113 * it's less than the number provided by the backend. The indirect_grefs field
114 * in blkif_request_indirect should be filled by the frontend with the
115 * grant references of the pages that are holding the indirect segments.
116 * This pages are filled with an array of blkif_request_segment_aligned
117 * that hold the information about the segments. The number of indirect
118 * pages to use is determined by the maximum number of segments
119 * a indirect request contains. Every indirect page can contain a maximum
120 * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
121 * so to calculate the number of indirect pages to use we have to do
122 * ceil(indirect_segments/512).
123 *
124 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
125 * create the "feature-max-indirect-segments" node!
126 */
127#define BLKIF_OP_INDIRECT 6
128
129/*
106 * Maximum scatter/gather segments per request. 130 * Maximum scatter/gather segments per request.
107 * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. 131 * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
108 * NB. This could be 12 if the ring indexes weren't stored in the same page. 132 * NB. This could be 12 if the ring indexes weren't stored in the same page.
109 */ 133 */
110#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 134#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
111 135
136#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
137
138struct blkif_request_segment_aligned {
139 grant_ref_t gref; /* reference to I/O buffer frame */
140 /* @first_sect: first sector in frame to transfer (inclusive). */
141 /* @last_sect: last sector in frame to transfer (inclusive). */
142 uint8_t first_sect, last_sect;
143 uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */
144} __attribute__((__packed__));
145
112struct blkif_request_rw { 146struct blkif_request_rw {
113 uint8_t nr_segments; /* number of segments */ 147 uint8_t nr_segments; /* number of segments */
114 blkif_vdev_t handle; /* only for read/write requests */ 148 blkif_vdev_t handle; /* only for read/write requests */
@@ -147,12 +181,31 @@ struct blkif_request_other {
147 uint64_t id; /* private guest value, echoed in resp */ 181 uint64_t id; /* private guest value, echoed in resp */
148} __attribute__((__packed__)); 182} __attribute__((__packed__));
149 183
184struct blkif_request_indirect {
185 uint8_t indirect_op;
186 uint16_t nr_segments;
187#ifdef CONFIG_X86_64
188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
189#endif
190 uint64_t id;
191 blkif_sector_t sector_number;
192 blkif_vdev_t handle;
193 uint16_t _pad2;
194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
195#ifdef CONFIG_X86_64
196 uint32_t _pad3; /* make it 64 byte aligned */
197#else
198 uint64_t _pad3; /* make it 64 byte aligned */
199#endif
200} __attribute__((__packed__));
201
150struct blkif_request { 202struct blkif_request {
151 uint8_t operation; /* BLKIF_OP_??? */ 203 uint8_t operation; /* BLKIF_OP_??? */
152 union { 204 union {
153 struct blkif_request_rw rw; 205 struct blkif_request_rw rw;
154 struct blkif_request_discard discard; 206 struct blkif_request_discard discard;
155 struct blkif_request_other other; 207 struct blkif_request_other other;
208 struct blkif_request_indirect indirect;
156 } u; 209 } u;
157} __attribute__((__packed__)); 210} __attribute__((__packed__));
158 211
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 75271b9a8f61..7d28aff605c7 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -188,6 +188,11 @@ struct __name##_back_ring { \
188#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ 188#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
189 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) 189 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
190 190
191/* Ill-behaved frontend determination: Can there be this many requests? */
192#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
193 (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
194
195
191#define RING_PUSH_REQUESTS(_r) do { \ 196#define RING_PUSH_REQUESTS(_r) do { \
192 wmb(); /* back sees requests /before/ updated producer index */ \ 197 wmb(); /* back sees requests /before/ updated producer index */ \
193 (_r)->sring->req_prod = (_r)->req_prod_pvt; \ 198 (_r)->sring->req_prod = (_r)->req_prod_pvt; \
diff --git a/init/Kconfig b/init/Kconfig
index 247084be0590..fed81b576f29 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -955,7 +955,7 @@ config MEMCG_SWAP_ENABLED
955 Memory Resource Controller Swap Extension comes with its price in 955 Memory Resource Controller Swap Extension comes with its price in
956 a bigger memory consumption. General purpose distribution kernels 956 a bigger memory consumption. General purpose distribution kernels
957 which want to enable the feature but keep it disabled by default 957 which want to enable the feature but keep it disabled by default
958 and let the user enable it by swapaccount boot command line 958 and let the user enable it by swapaccount=1 boot command line
959 parameter should have this option unselected. 959 parameter should have this option unselected.
960 For those who want to have the feature enabled by default should 960 For those who want to have the feature enabled by default should
961 select this option (if, for some reason, they need to disable it 961 select this option (if, for some reason, they need to disable it
diff --git a/kernel/Makefile b/kernel/Makefile
index 470839d1a30e..35ef1185e359 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5obj-y = fork.o exec_domain.o panic.o printk.o \ 5obj-y = fork.o exec_domain.o panic.o \
6 cpu.o exit.o itimer.o time.o softirq.o resource.o \ 6 cpu.o exit.o itimer.o time.o softirq.o resource.o \
7 sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ 7 sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
8 signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
@@ -24,6 +24,7 @@ endif
24 24
25obj-y += sched/ 25obj-y += sched/
26obj-y += power/ 26obj-y += power/
27obj-y += printk/
27obj-y += cpu/ 28obj-y += cpu/
28 29
29obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o 30obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0e0b20b8c5db..781845a013ab 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1845,36 +1845,43 @@ out:
1845EXPORT_SYMBOL_GPL(cgroup_path); 1845EXPORT_SYMBOL_GPL(cgroup_path);
1846 1846
1847/** 1847/**
1848 * task_cgroup_path_from_hierarchy - cgroup path of a task on a hierarchy 1848 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1849 * @task: target task 1849 * @task: target task
1850 * @hierarchy_id: the hierarchy to look up @task's cgroup from
1851 * @buf: the buffer to write the path into 1850 * @buf: the buffer to write the path into
1852 * @buflen: the length of the buffer 1851 * @buflen: the length of the buffer
1853 * 1852 *
1854 * Determine @task's cgroup on the hierarchy specified by @hierarchy_id and 1853 * Determine @task's cgroup on the first (the one with the lowest non-zero
1855 * copy its path into @buf. This function grabs cgroup_mutex and shouldn't 1854 * hierarchy_id) cgroup hierarchy and copy its path into @buf. This
1856 * be used inside locks used by cgroup controller callbacks. 1855 * function grabs cgroup_mutex and shouldn't be used inside locks used by
1856 * cgroup controller callbacks.
1857 *
1858 * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short.
1857 */ 1859 */
1858int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id, 1860int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1859 char *buf, size_t buflen)
1860{ 1861{
1861 struct cgroupfs_root *root; 1862 struct cgroupfs_root *root;
1862 struct cgroup *cgrp = NULL; 1863 struct cgroup *cgrp;
1863 int ret = -ENOENT; 1864 int hierarchy_id = 1, ret = 0;
1865
1866 if (buflen < 2)
1867 return -ENAMETOOLONG;
1864 1868
1865 mutex_lock(&cgroup_mutex); 1869 mutex_lock(&cgroup_mutex);
1866 1870
1867 root = idr_find(&cgroup_hierarchy_idr, hierarchy_id); 1871 root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
1872
1868 if (root) { 1873 if (root) {
1869 cgrp = task_cgroup_from_root(task, root); 1874 cgrp = task_cgroup_from_root(task, root);
1870 ret = cgroup_path(cgrp, buf, buflen); 1875 ret = cgroup_path(cgrp, buf, buflen);
1876 } else {
1877 /* if no hierarchy exists, everyone is in "/" */
1878 memcpy(buf, "/", 2);
1871 } 1879 }
1872 1880
1873 mutex_unlock(&cgroup_mutex); 1881 mutex_unlock(&cgroup_mutex);
1874
1875 return ret; 1882 return ret;
1876} 1883}
1877EXPORT_SYMBOL_GPL(task_cgroup_path_from_hierarchy); 1884EXPORT_SYMBOL_GPL(task_cgroup_path);
1878 1885
1879/* 1886/*
1880 * Control Group taskset 1887 * Control Group taskset
@@ -4328,8 +4335,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4328 } 4335 }
4329 4336
4330 err = percpu_ref_init(&css->refcnt, css_release); 4337 err = percpu_ref_init(&css->refcnt, css_release);
4331 if (err) 4338 if (err) {
4339 ss->css_free(cgrp);
4332 goto err_free_all; 4340 goto err_free_all;
4341 }
4333 4342
4334 init_cgroup_css(css, ss, cgrp); 4343 init_cgroup_css(css, ss, cgrp);
4335 4344
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e5657788fedd..ea1966db34f2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
475 475
476 /* 476 /*
477 * Cpusets with tasks - existing or newly being attached - can't 477 * Cpusets with tasks - existing or newly being attached - can't
478 * have empty cpus_allowed or mems_allowed. 478 * be changed to have empty cpus_allowed or mems_allowed.
479 */ 479 */
480 ret = -ENOSPC; 480 ret = -ENOSPC;
481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) && 481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
482 (cpumask_empty(trial->cpus_allowed) && 482 if (!cpumask_empty(cur->cpus_allowed) &&
483 nodes_empty(trial->mems_allowed))) 483 cpumask_empty(trial->cpus_allowed))
484 goto out; 484 goto out;
485 if (!nodes_empty(cur->mems_allowed) &&
486 nodes_empty(trial->mems_allowed))
487 goto out;
488 }
485 489
486 ret = 0; 490 ret = 0;
487out: 491out:
@@ -1608,11 +1612,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1608{ 1612{
1609 struct cpuset *cs = cgroup_cs(cgrp); 1613 struct cpuset *cs = cgroup_cs(cgrp);
1610 cpuset_filetype_t type = cft->private; 1614 cpuset_filetype_t type = cft->private;
1611 int retval = -ENODEV; 1615 int retval = 0;
1612 1616
1613 mutex_lock(&cpuset_mutex); 1617 mutex_lock(&cpuset_mutex);
1614 if (!is_cpuset_online(cs)) 1618 if (!is_cpuset_online(cs)) {
1619 retval = -ENODEV;
1615 goto out_unlock; 1620 goto out_unlock;
1621 }
1616 1622
1617 switch (type) { 1623 switch (type) {
1618 case FILE_CPU_EXCLUSIVE: 1624 case FILE_CPU_EXCLUSIVE:
diff --git a/kernel/fork.c b/kernel/fork.c
index 403d2bb8a968..e23bb19e2a3e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
1679 int __user *, parent_tidptr, 1679 int __user *, parent_tidptr,
1680 int __user *, child_tidptr, 1680 int __user *, child_tidptr,
1681 int, tls_val) 1681 int, tls_val)
1682#elif defined(CONFIG_CLONE_BACKWARDS3)
1683SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
1684 int, stack_size,
1685 int __user *, parent_tidptr,
1686 int __user *, child_tidptr,
1687 int, tls_val)
1682#else 1688#else
1683SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 1689SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1684 int __user *, parent_tidptr, 1690 int __user *, parent_tidptr,
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 8b2afc1c9df0..b462fa197517 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -33,7 +33,7 @@ static DEFINE_SPINLOCK(freezer_lock);
33 */ 33 */
34bool freezing_slow_path(struct task_struct *p) 34bool freezing_slow_path(struct task_struct *p)
35{ 35{
36 if (p->flags & PF_NOFREEZE) 36 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
37 return false; 37 return false;
38 38
39 if (pm_nosig_freezing || cgroup_freezing(p)) 39 if (pm_nosig_freezing || cgroup_freezing(p))
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ff05f4bd86eb..a52ee7bb830d 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
686 might_sleep(); 686 might_sleep();
687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
688 0, &ctx->dep_map, _RET_IP_, ctx); 688 0, &ctx->dep_map, _RET_IP_, ctx);
689 if (!ret && ctx->acquired > 0) 689 if (!ret && ctx->acquired > 1)
690 return ww_mutex_deadlock_injection(lock, ctx); 690 return ww_mutex_deadlock_injection(lock, ctx);
691 691
692 return ret; 692 return ret;
@@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
703 0, &ctx->dep_map, _RET_IP_, ctx); 703 0, &ctx->dep_map, _RET_IP_, ctx);
704 704
705 if (!ret && ctx->acquired > 0) 705 if (!ret && ctx->acquired > 1)
706 return ww_mutex_deadlock_injection(lock, ctx); 706 return ww_mutex_deadlock_injection(lock, ctx);
707 707
708 return ret; 708 return ret;
diff --git a/kernel/power/process.c b/kernel/power/process.c
index fc0df8486449..06ec8869dbf1 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -109,6 +109,8 @@ static int try_to_freeze_tasks(bool user_only)
109 109
110/** 110/**
111 * freeze_processes - Signal user space processes to enter the refrigerator. 111 * freeze_processes - Signal user space processes to enter the refrigerator.
112 * The current thread will not be frozen. The same process that calls
113 * freeze_processes must later call thaw_processes.
112 * 114 *
113 * On success, returns 0. On failure, -errno and system is fully thawed. 115 * On success, returns 0. On failure, -errno and system is fully thawed.
114 */ 116 */
@@ -120,6 +122,9 @@ int freeze_processes(void)
120 if (error) 122 if (error)
121 return error; 123 return error;
122 124
125 /* Make sure this task doesn't get frozen */
126 current->flags |= PF_SUSPEND_TASK;
127
123 if (!pm_freezing) 128 if (!pm_freezing)
124 atomic_inc(&system_freezing_cnt); 129 atomic_inc(&system_freezing_cnt);
125 130
@@ -168,6 +173,7 @@ int freeze_kernel_threads(void)
168void thaw_processes(void) 173void thaw_processes(void)
169{ 174{
170 struct task_struct *g, *p; 175 struct task_struct *g, *p;
176 struct task_struct *curr = current;
171 177
172 if (pm_freezing) 178 if (pm_freezing)
173 atomic_dec(&system_freezing_cnt); 179 atomic_dec(&system_freezing_cnt);
@@ -182,10 +188,15 @@ void thaw_processes(void)
182 188
183 read_lock(&tasklist_lock); 189 read_lock(&tasklist_lock);
184 do_each_thread(g, p) { 190 do_each_thread(g, p) {
191 /* No other threads should have PF_SUSPEND_TASK set */
192 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
185 __thaw_task(p); 193 __thaw_task(p);
186 } while_each_thread(g, p); 194 } while_each_thread(g, p);
187 read_unlock(&tasklist_lock); 195 read_unlock(&tasklist_lock);
188 196
197 WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
198 curr->flags &= ~PF_SUSPEND_TASK;
199
189 usermodehelper_enable(); 200 usermodehelper_enable();
190 201
191 schedule(); 202 schedule();
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 06fe28589e9c..a394297f8b2f 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req)
296} 296}
297EXPORT_SYMBOL_GPL(pm_qos_request_active); 297EXPORT_SYMBOL_GPL(pm_qos_request_active);
298 298
299static void __pm_qos_update_request(struct pm_qos_request *req,
300 s32 new_value)
301{
302 trace_pm_qos_update_request(req->pm_qos_class, new_value);
303
304 if (new_value != req->node.prio)
305 pm_qos_update_target(
306 pm_qos_array[req->pm_qos_class]->constraints,
307 &req->node, PM_QOS_UPDATE_REQ, new_value);
308}
309
299/** 310/**
300 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout 311 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
301 * @work: work struct for the delayed work (timeout) 312 * @work: work struct for the delayed work (timeout)
@@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work)
308 struct pm_qos_request, 319 struct pm_qos_request,
309 work); 320 work);
310 321
311 pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); 322 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
312} 323}
313 324
314/** 325/**
@@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
364 } 375 }
365 376
366 cancel_delayed_work_sync(&req->work); 377 cancel_delayed_work_sync(&req->work);
367 378 __pm_qos_update_request(req, new_value);
368 trace_pm_qos_update_request(req->pm_qos_class, new_value);
369 if (new_value != req->node.prio)
370 pm_qos_update_target(
371 pm_qos_array[req->pm_qos_class]->constraints,
372 &req->node, PM_QOS_UPDATE_REQ, new_value);
373} 379}
374EXPORT_SYMBOL_GPL(pm_qos_update_request); 380EXPORT_SYMBOL_GPL(pm_qos_update_request);
375 381
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
new file mode 100644
index 000000000000..85405bdcf2b3
--- /dev/null
+++ b/kernel/printk/Makefile
@@ -0,0 +1,2 @@
1obj-y = printk.o
2obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
new file mode 100644
index 000000000000..276762f3a460
--- /dev/null
+++ b/kernel/printk/braille.c
@@ -0,0 +1,49 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/kernel.h>
4#include <linux/console.h>
5#include <linux/string.h>
6
7#include "console_cmdline.h"
8#include "braille.h"
9
10char *_braille_console_setup(char **str, char **brl_options)
11{
12 if (!memcmp(*str, "brl,", 4)) {
13 *brl_options = "";
14 *str += 4;
15 } else if (!memcmp(str, "brl=", 4)) {
16 *brl_options = *str + 4;
17 *str = strchr(*brl_options, ',');
18 if (!*str)
19 pr_err("need port name after brl=\n");
20 else
21 *((*str)++) = 0;
22 } else
23 return NULL;
24
25 return *str;
26}
27
28int
29_braille_register_console(struct console *console, struct console_cmdline *c)
30{
31 int rtn = 0;
32
33 if (c->brl_options) {
34 console->flags |= CON_BRL;
35 rtn = braille_register_console(console, c->index, c->options,
36 c->brl_options);
37 }
38
39 return rtn;
40}
41
42int
43_braille_unregister_console(struct console *console)
44{
45 if (console->flags & CON_BRL)
46 return braille_unregister_console(console);
47
48 return 0;
49}
diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h
new file mode 100644
index 000000000000..769d771145c8
--- /dev/null
+++ b/kernel/printk/braille.h
@@ -0,0 +1,48 @@
1#ifndef _PRINTK_BRAILLE_H
2#define _PRINTK_BRAILLE_H
3
4#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
5
6static inline void
7braille_set_options(struct console_cmdline *c, char *brl_options)
8{
9 c->brl_options = brl_options;
10}
11
12char *
13_braille_console_setup(char **str, char **brl_options);
14
15int
16_braille_register_console(struct console *console, struct console_cmdline *c);
17
18int
19_braille_unregister_console(struct console *console);
20
21#else
22
23static inline void
24braille_set_options(struct console_cmdline *c, char *brl_options)
25{
26}
27
28static inline char *
29_braille_console_setup(char **str, char **brl_options)
30{
31 return NULL;
32}
33
34static inline int
35_braille_register_console(struct console *console, struct console_cmdline *c)
36{
37 return 0;
38}
39
40static inline int
41_braille_unregister_console(struct console *console)
42{
43 return 0;
44}
45
46#endif
47
48#endif
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
new file mode 100644
index 000000000000..cbd69d842341
--- /dev/null
+++ b/kernel/printk/console_cmdline.h
@@ -0,0 +1,14 @@
1#ifndef _CONSOLE_CMDLINE_H
2#define _CONSOLE_CMDLINE_H
3
4struct console_cmdline
5{
6 char name[8]; /* Name of the driver */
7 int index; /* Minor dev. to use */
8 char *options; /* Options for the driver */
9#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
10 char *brl_options; /* Options for braille driver */
11#endif
12};
13
14#endif
diff --git a/kernel/printk.c b/kernel/printk/printk.c
index 69b0890ed7e5..5b5a7080e2a5 100644
--- a/kernel/printk.c
+++ b/kernel/printk/printk.c
@@ -51,6 +51,9 @@
51#define CREATE_TRACE_POINTS 51#define CREATE_TRACE_POINTS
52#include <trace/events/printk.h> 52#include <trace/events/printk.h>
53 53
54#include "console_cmdline.h"
55#include "braille.h"
56
54/* printk's without a loglevel use this.. */ 57/* printk's without a loglevel use this.. */
55#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL 58#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
56 59
@@ -105,19 +108,11 @@ static struct console *exclusive_console;
105/* 108/*
106 * Array of consoles built from command line options (console=) 109 * Array of consoles built from command line options (console=)
107 */ 110 */
108struct console_cmdline
109{
110 char name[8]; /* Name of the driver */
111 int index; /* Minor dev. to use */
112 char *options; /* Options for the driver */
113#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
114 char *brl_options; /* Options for braille driver */
115#endif
116};
117 111
118#define MAX_CMDLINECONSOLES 8 112#define MAX_CMDLINECONSOLES 8
119 113
120static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; 114static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
115
121static int selected_console = -1; 116static int selected_console = -1;
122static int preferred_console = -1; 117static int preferred_console = -1;
123int console_set_on_cmdline; 118int console_set_on_cmdline;
@@ -178,7 +173,7 @@ static int console_may_schedule;
178 * 67 "g" 173 * 67 "g"
179 * 0032 00 00 00 padding to next message header 174 * 0032 00 00 00 padding to next message header
180 * 175 *
181 * The 'struct log' buffer header must never be directly exported to 176 * The 'struct printk_log' buffer header must never be directly exported to
182 * userspace, it is a kernel-private implementation detail that might 177 * userspace, it is a kernel-private implementation detail that might
183 * need to be changed in the future, when the requirements change. 178 * need to be changed in the future, when the requirements change.
184 * 179 *
@@ -200,7 +195,7 @@ enum log_flags {
200 LOG_CONT = 8, /* text is a fragment of a continuation line */ 195 LOG_CONT = 8, /* text is a fragment of a continuation line */
201}; 196};
202 197
203struct log { 198struct printk_log {
204 u64 ts_nsec; /* timestamp in nanoseconds */ 199 u64 ts_nsec; /* timestamp in nanoseconds */
205 u16 len; /* length of entire record */ 200 u16 len; /* length of entire record */
206 u16 text_len; /* length of text buffer */ 201 u16 text_len; /* length of text buffer */
@@ -248,7 +243,7 @@ static u32 clear_idx;
248#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 243#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
249#define LOG_ALIGN 4 244#define LOG_ALIGN 4
250#else 245#else
251#define LOG_ALIGN __alignof__(struct log) 246#define LOG_ALIGN __alignof__(struct printk_log)
252#endif 247#endif
253#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) 248#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
254static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); 249static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
@@ -259,35 +254,35 @@ static u32 log_buf_len = __LOG_BUF_LEN;
259static volatile unsigned int logbuf_cpu = UINT_MAX; 254static volatile unsigned int logbuf_cpu = UINT_MAX;
260 255
261/* human readable text of the record */ 256/* human readable text of the record */
262static char *log_text(const struct log *msg) 257static char *log_text(const struct printk_log *msg)
263{ 258{
264 return (char *)msg + sizeof(struct log); 259 return (char *)msg + sizeof(struct printk_log);
265} 260}
266 261
267/* optional key/value pair dictionary attached to the record */ 262/* optional key/value pair dictionary attached to the record */
268static char *log_dict(const struct log *msg) 263static char *log_dict(const struct printk_log *msg)
269{ 264{
270 return (char *)msg + sizeof(struct log) + msg->text_len; 265 return (char *)msg + sizeof(struct printk_log) + msg->text_len;
271} 266}
272 267
273/* get record by index; idx must point to valid msg */ 268/* get record by index; idx must point to valid msg */
274static struct log *log_from_idx(u32 idx) 269static struct printk_log *log_from_idx(u32 idx)
275{ 270{
276 struct log *msg = (struct log *)(log_buf + idx); 271 struct printk_log *msg = (struct printk_log *)(log_buf + idx);
277 272
278 /* 273 /*
279 * A length == 0 record is the end of buffer marker. Wrap around and 274 * A length == 0 record is the end of buffer marker. Wrap around and
280 * read the message at the start of the buffer. 275 * read the message at the start of the buffer.
281 */ 276 */
282 if (!msg->len) 277 if (!msg->len)
283 return (struct log *)log_buf; 278 return (struct printk_log *)log_buf;
284 return msg; 279 return msg;
285} 280}
286 281
287/* get next record; idx must point to valid msg */ 282/* get next record; idx must point to valid msg */
288static u32 log_next(u32 idx) 283static u32 log_next(u32 idx)
289{ 284{
290 struct log *msg = (struct log *)(log_buf + idx); 285 struct printk_log *msg = (struct printk_log *)(log_buf + idx);
291 286
292 /* length == 0 indicates the end of the buffer; wrap */ 287 /* length == 0 indicates the end of the buffer; wrap */
293 /* 288 /*
@@ -296,7 +291,7 @@ static u32 log_next(u32 idx)
296 * return the one after that. 291 * return the one after that.
297 */ 292 */
298 if (!msg->len) { 293 if (!msg->len) {
299 msg = (struct log *)log_buf; 294 msg = (struct printk_log *)log_buf;
300 return msg->len; 295 return msg->len;
301 } 296 }
302 return idx + msg->len; 297 return idx + msg->len;
@@ -308,11 +303,11 @@ static void log_store(int facility, int level,
308 const char *dict, u16 dict_len, 303 const char *dict, u16 dict_len,
309 const char *text, u16 text_len) 304 const char *text, u16 text_len)
310{ 305{
311 struct log *msg; 306 struct printk_log *msg;
312 u32 size, pad_len; 307 u32 size, pad_len;
313 308
314 /* number of '\0' padding bytes to next message */ 309 /* number of '\0' padding bytes to next message */
315 size = sizeof(struct log) + text_len + dict_len; 310 size = sizeof(struct printk_log) + text_len + dict_len;
316 pad_len = (-size) & (LOG_ALIGN - 1); 311 pad_len = (-size) & (LOG_ALIGN - 1);
317 size += pad_len; 312 size += pad_len;
318 313
@@ -324,7 +319,7 @@ static void log_store(int facility, int level,
324 else 319 else
325 free = log_first_idx - log_next_idx; 320 free = log_first_idx - log_next_idx;
326 321
327 if (free > size + sizeof(struct log)) 322 if (free > size + sizeof(struct printk_log))
328 break; 323 break;
329 324
330 /* drop old messages until we have enough contiuous space */ 325 /* drop old messages until we have enough contiuous space */
@@ -332,18 +327,18 @@ static void log_store(int facility, int level,
332 log_first_seq++; 327 log_first_seq++;
333 } 328 }
334 329
335 if (log_next_idx + size + sizeof(struct log) >= log_buf_len) { 330 if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) {
336 /* 331 /*
337 * This message + an additional empty header does not fit 332 * This message + an additional empty header does not fit
338 * at the end of the buffer. Add an empty header with len == 0 333 * at the end of the buffer. Add an empty header with len == 0
339 * to signify a wrap around. 334 * to signify a wrap around.
340 */ 335 */
341 memset(log_buf + log_next_idx, 0, sizeof(struct log)); 336 memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
342 log_next_idx = 0; 337 log_next_idx = 0;
343 } 338 }
344 339
345 /* fill message */ 340 /* fill message */
346 msg = (struct log *)(log_buf + log_next_idx); 341 msg = (struct printk_log *)(log_buf + log_next_idx);
347 memcpy(log_text(msg), text, text_len); 342 memcpy(log_text(msg), text, text_len);
348 msg->text_len = text_len; 343 msg->text_len = text_len;
349 memcpy(log_dict(msg), dict, dict_len); 344 memcpy(log_dict(msg), dict, dict_len);
@@ -356,7 +351,7 @@ static void log_store(int facility, int level,
356 else 351 else
357 msg->ts_nsec = local_clock(); 352 msg->ts_nsec = local_clock();
358 memset(log_dict(msg) + dict_len, 0, pad_len); 353 memset(log_dict(msg) + dict_len, 0, pad_len);
359 msg->len = sizeof(struct log) + text_len + dict_len + pad_len; 354 msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len;
360 355
361 /* insert message */ 356 /* insert message */
362 log_next_idx += msg->len; 357 log_next_idx += msg->len;
@@ -479,7 +474,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
479 size_t count, loff_t *ppos) 474 size_t count, loff_t *ppos)
480{ 475{
481 struct devkmsg_user *user = file->private_data; 476 struct devkmsg_user *user = file->private_data;
482 struct log *msg; 477 struct printk_log *msg;
483 u64 ts_usec; 478 u64 ts_usec;
484 size_t i; 479 size_t i;
485 char cont = '-'; 480 char cont = '-';
@@ -724,14 +719,14 @@ void log_buf_kexec_setup(void)
724 VMCOREINFO_SYMBOL(log_first_idx); 719 VMCOREINFO_SYMBOL(log_first_idx);
725 VMCOREINFO_SYMBOL(log_next_idx); 720 VMCOREINFO_SYMBOL(log_next_idx);
726 /* 721 /*
727 * Export struct log size and field offsets. User space tools can 722 * Export struct printk_log size and field offsets. User space tools can
728 * parse it and detect any changes to structure down the line. 723 * parse it and detect any changes to structure down the line.
729 */ 724 */
730 VMCOREINFO_STRUCT_SIZE(log); 725 VMCOREINFO_STRUCT_SIZE(printk_log);
731 VMCOREINFO_OFFSET(log, ts_nsec); 726 VMCOREINFO_OFFSET(printk_log, ts_nsec);
732 VMCOREINFO_OFFSET(log, len); 727 VMCOREINFO_OFFSET(printk_log, len);
733 VMCOREINFO_OFFSET(log, text_len); 728 VMCOREINFO_OFFSET(printk_log, text_len);
734 VMCOREINFO_OFFSET(log, dict_len); 729 VMCOREINFO_OFFSET(printk_log, dict_len);
735} 730}
736#endif 731#endif
737 732
@@ -884,7 +879,7 @@ static size_t print_time(u64 ts, char *buf)
884 (unsigned long)ts, rem_nsec / 1000); 879 (unsigned long)ts, rem_nsec / 1000);
885} 880}
886 881
887static size_t print_prefix(const struct log *msg, bool syslog, char *buf) 882static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
888{ 883{
889 size_t len = 0; 884 size_t len = 0;
890 unsigned int prefix = (msg->facility << 3) | msg->level; 885 unsigned int prefix = (msg->facility << 3) | msg->level;
@@ -907,7 +902,7 @@ static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
907 return len; 902 return len;
908} 903}
909 904
910static size_t msg_print_text(const struct log *msg, enum log_flags prev, 905static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
911 bool syslog, char *buf, size_t size) 906 bool syslog, char *buf, size_t size)
912{ 907{
913 const char *text = log_text(msg); 908 const char *text = log_text(msg);
@@ -969,7 +964,7 @@ static size_t msg_print_text(const struct log *msg, enum log_flags prev,
969static int syslog_print(char __user *buf, int size) 964static int syslog_print(char __user *buf, int size)
970{ 965{
971 char *text; 966 char *text;
972 struct log *msg; 967 struct printk_log *msg;
973 int len = 0; 968 int len = 0;
974 969
975 text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); 970 text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
@@ -1060,7 +1055,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
1060 idx = clear_idx; 1055 idx = clear_idx;
1061 prev = 0; 1056 prev = 0;
1062 while (seq < log_next_seq) { 1057 while (seq < log_next_seq) {
1063 struct log *msg = log_from_idx(idx); 1058 struct printk_log *msg = log_from_idx(idx);
1064 1059
1065 len += msg_print_text(msg, prev, true, NULL, 0); 1060 len += msg_print_text(msg, prev, true, NULL, 0);
1066 prev = msg->flags; 1061 prev = msg->flags;
@@ -1073,7 +1068,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
1073 idx = clear_idx; 1068 idx = clear_idx;
1074 prev = 0; 1069 prev = 0;
1075 while (len > size && seq < log_next_seq) { 1070 while (len > size && seq < log_next_seq) {
1076 struct log *msg = log_from_idx(idx); 1071 struct printk_log *msg = log_from_idx(idx);
1077 1072
1078 len -= msg_print_text(msg, prev, true, NULL, 0); 1073 len -= msg_print_text(msg, prev, true, NULL, 0);
1079 prev = msg->flags; 1074 prev = msg->flags;
@@ -1087,7 +1082,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
1087 len = 0; 1082 len = 0;
1088 prev = 0; 1083 prev = 0;
1089 while (len >= 0 && seq < next_seq) { 1084 while (len >= 0 && seq < next_seq) {
1090 struct log *msg = log_from_idx(idx); 1085 struct printk_log *msg = log_from_idx(idx);
1091 int textlen; 1086 int textlen;
1092 1087
1093 textlen = msg_print_text(msg, prev, true, text, 1088 textlen = msg_print_text(msg, prev, true, text,
@@ -1233,7 +1228,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
1233 1228
1234 error = 0; 1229 error = 0;
1235 while (seq < log_next_seq) { 1230 while (seq < log_next_seq) {
1236 struct log *msg = log_from_idx(idx); 1231 struct printk_log *msg = log_from_idx(idx);
1237 1232
1238 error += msg_print_text(msg, prev, true, NULL, 0); 1233 error += msg_print_text(msg, prev, true, NULL, 0);
1239 idx = log_next(idx); 1234 idx = log_next(idx);
@@ -1719,10 +1714,10 @@ static struct cont {
1719 u8 level; 1714 u8 level;
1720 bool flushed:1; 1715 bool flushed:1;
1721} cont; 1716} cont;
1722static struct log *log_from_idx(u32 idx) { return NULL; } 1717static struct printk_log *log_from_idx(u32 idx) { return NULL; }
1723static u32 log_next(u32 idx) { return 0; } 1718static u32 log_next(u32 idx) { return 0; }
1724static void call_console_drivers(int level, const char *text, size_t len) {} 1719static void call_console_drivers(int level, const char *text, size_t len) {}
1725static size_t msg_print_text(const struct log *msg, enum log_flags prev, 1720static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
1726 bool syslog, char *buf, size_t size) { return 0; } 1721 bool syslog, char *buf, size_t size) { return 0; }
1727static size_t cont_print_text(char *text, size_t size) { return 0; } 1722static size_t cont_print_text(char *text, size_t size) { return 0; }
1728 1723
@@ -1761,23 +1756,23 @@ static int __add_preferred_console(char *name, int idx, char *options,
1761 * See if this tty is not yet registered, and 1756 * See if this tty is not yet registered, and
1762 * if we have a slot free. 1757 * if we have a slot free.
1763 */ 1758 */
1764 for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) 1759 for (i = 0, c = console_cmdline;
1765 if (strcmp(console_cmdline[i].name, name) == 0 && 1760 i < MAX_CMDLINECONSOLES && c->name[0];
1766 console_cmdline[i].index == idx) { 1761 i++, c++) {
1767 if (!brl_options) 1762 if (strcmp(c->name, name) == 0 && c->index == idx) {
1768 selected_console = i; 1763 if (!brl_options)
1769 return 0; 1764 selected_console = i;
1765 return 0;
1770 } 1766 }
1767 }
1771 if (i == MAX_CMDLINECONSOLES) 1768 if (i == MAX_CMDLINECONSOLES)
1772 return -E2BIG; 1769 return -E2BIG;
1773 if (!brl_options) 1770 if (!brl_options)
1774 selected_console = i; 1771 selected_console = i;
1775 c = &console_cmdline[i];
1776 strlcpy(c->name, name, sizeof(c->name)); 1772 strlcpy(c->name, name, sizeof(c->name));
1777 c->options = options; 1773 c->options = options;
1778#ifdef CONFIG_A11Y_BRAILLE_CONSOLE 1774 braille_set_options(c, brl_options);
1779 c->brl_options = brl_options; 1775
1780#endif
1781 c->index = idx; 1776 c->index = idx;
1782 return 0; 1777 return 0;
1783} 1778}
@@ -1790,20 +1785,8 @@ static int __init console_setup(char *str)
1790 char *s, *options, *brl_options = NULL; 1785 char *s, *options, *brl_options = NULL;
1791 int idx; 1786 int idx;
1792 1787
1793#ifdef CONFIG_A11Y_BRAILLE_CONSOLE 1788 if (_braille_console_setup(&str, &brl_options))
1794 if (!memcmp(str, "brl,", 4)) { 1789 return 1;
1795 brl_options = "";
1796 str += 4;
1797 } else if (!memcmp(str, "brl=", 4)) {
1798 brl_options = str + 4;
1799 str = strchr(brl_options, ',');
1800 if (!str) {
1801 printk(KERN_ERR "need port name after brl=\n");
1802 return 1;
1803 }
1804 *(str++) = 0;
1805 }
1806#endif
1807 1790
1808 /* 1791 /*
1809 * Decode str into name, index, options. 1792 * Decode str into name, index, options.
@@ -1858,15 +1841,15 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
1858 struct console_cmdline *c; 1841 struct console_cmdline *c;
1859 int i; 1842 int i;
1860 1843
1861 for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) 1844 for (i = 0, c = console_cmdline;
1862 if (strcmp(console_cmdline[i].name, name) == 0 && 1845 i < MAX_CMDLINECONSOLES && c->name[0];
1863 console_cmdline[i].index == idx) { 1846 i++, c++)
1864 c = &console_cmdline[i]; 1847 if (strcmp(c->name, name) == 0 && c->index == idx) {
1865 strlcpy(c->name, name_new, sizeof(c->name)); 1848 strlcpy(c->name, name_new, sizeof(c->name));
1866 c->name[sizeof(c->name) - 1] = 0; 1849 c->name[sizeof(c->name) - 1] = 0;
1867 c->options = options; 1850 c->options = options;
1868 c->index = idx_new; 1851 c->index = idx_new;
1869 return i; 1852 return i;
1870 } 1853 }
1871 /* not found */ 1854 /* not found */
1872 return -1; 1855 return -1;
@@ -2046,7 +2029,7 @@ void console_unlock(void)
2046 console_cont_flush(text, sizeof(text)); 2029 console_cont_flush(text, sizeof(text));
2047again: 2030again:
2048 for (;;) { 2031 for (;;) {
2049 struct log *msg; 2032 struct printk_log *msg;
2050 size_t len; 2033 size_t len;
2051 int level; 2034 int level;
2052 2035
@@ -2241,6 +2224,7 @@ void register_console(struct console *newcon)
2241 int i; 2224 int i;
2242 unsigned long flags; 2225 unsigned long flags;
2243 struct console *bcon = NULL; 2226 struct console *bcon = NULL;
2227 struct console_cmdline *c;
2244 2228
2245 /* 2229 /*
2246 * before we register a new CON_BOOT console, make sure we don't 2230 * before we register a new CON_BOOT console, make sure we don't
@@ -2288,30 +2272,25 @@ void register_console(struct console *newcon)
2288 * See if this console matches one we selected on 2272 * See if this console matches one we selected on
2289 * the command line. 2273 * the command line.
2290 */ 2274 */
2291 for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; 2275 for (i = 0, c = console_cmdline;
2292 i++) { 2276 i < MAX_CMDLINECONSOLES && c->name[0];
2293 if (strcmp(console_cmdline[i].name, newcon->name) != 0) 2277 i++, c++) {
2278 if (strcmp(c->name, newcon->name) != 0)
2294 continue; 2279 continue;
2295 if (newcon->index >= 0 && 2280 if (newcon->index >= 0 &&
2296 newcon->index != console_cmdline[i].index) 2281 newcon->index != c->index)
2297 continue; 2282 continue;
2298 if (newcon->index < 0) 2283 if (newcon->index < 0)
2299 newcon->index = console_cmdline[i].index; 2284 newcon->index = c->index;
2300#ifdef CONFIG_A11Y_BRAILLE_CONSOLE 2285
2301 if (console_cmdline[i].brl_options) { 2286 if (_braille_register_console(newcon, c))
2302 newcon->flags |= CON_BRL;
2303 braille_register_console(newcon,
2304 console_cmdline[i].index,
2305 console_cmdline[i].options,
2306 console_cmdline[i].brl_options);
2307 return; 2287 return;
2308 } 2288
2309#endif
2310 if (newcon->setup && 2289 if (newcon->setup &&
2311 newcon->setup(newcon, console_cmdline[i].options) != 0) 2290 newcon->setup(newcon, console_cmdline[i].options) != 0)
2312 break; 2291 break;
2313 newcon->flags |= CON_ENABLED; 2292 newcon->flags |= CON_ENABLED;
2314 newcon->index = console_cmdline[i].index; 2293 newcon->index = c->index;
2315 if (i == selected_console) { 2294 if (i == selected_console) {
2316 newcon->flags |= CON_CONSDEV; 2295 newcon->flags |= CON_CONSDEV;
2317 preferred_console = selected_console; 2296 preferred_console = selected_console;
@@ -2394,13 +2373,13 @@ EXPORT_SYMBOL(register_console);
2394int unregister_console(struct console *console) 2373int unregister_console(struct console *console)
2395{ 2374{
2396 struct console *a, *b; 2375 struct console *a, *b;
2397 int res = 1; 2376 int res;
2398 2377
2399#ifdef CONFIG_A11Y_BRAILLE_CONSOLE 2378 res = _braille_unregister_console(console);
2400 if (console->flags & CON_BRL) 2379 if (res)
2401 return braille_unregister_console(console); 2380 return res;
2402#endif
2403 2381
2382 res = 1;
2404 console_lock(); 2383 console_lock();
2405 if (console_drivers == console) { 2384 if (console_drivers == console) {
2406 console_drivers=console->next; 2385 console_drivers=console->next;
@@ -2666,7 +2645,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
2666bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, 2645bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
2667 char *line, size_t size, size_t *len) 2646 char *line, size_t size, size_t *len)
2668{ 2647{
2669 struct log *msg; 2648 struct printk_log *msg;
2670 size_t l = 0; 2649 size_t l = 0;
2671 bool ret = false; 2650 bool ret = false;
2672 2651
@@ -2778,7 +2757,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
2778 idx = dumper->cur_idx; 2757 idx = dumper->cur_idx;
2779 prev = 0; 2758 prev = 0;
2780 while (seq < dumper->next_seq) { 2759 while (seq < dumper->next_seq) {
2781 struct log *msg = log_from_idx(idx); 2760 struct printk_log *msg = log_from_idx(idx);
2782 2761
2783 l += msg_print_text(msg, prev, true, NULL, 0); 2762 l += msg_print_text(msg, prev, true, NULL, 0);
2784 idx = log_next(idx); 2763 idx = log_next(idx);
@@ -2791,7 +2770,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
2791 idx = dumper->cur_idx; 2770 idx = dumper->cur_idx;
2792 prev = 0; 2771 prev = 0;
2793 while (l > size && seq < dumper->next_seq) { 2772 while (l > size && seq < dumper->next_seq) {
2794 struct log *msg = log_from_idx(idx); 2773 struct printk_log *msg = log_from_idx(idx);
2795 2774
2796 l -= msg_print_text(msg, prev, true, NULL, 0); 2775 l -= msg_print_text(msg, prev, true, NULL, 0);
2797 idx = log_next(idx); 2776 idx = log_next(idx);
@@ -2806,7 +2785,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
2806 l = 0; 2785 l = 0;
2807 prev = 0; 2786 prev = 0;
2808 while (seq < dumper->next_seq) { 2787 while (seq < dumper->next_seq) {
2809 struct log *msg = log_from_idx(idx); 2788 struct printk_log *msg = log_from_idx(idx);
2810 2789
2811 l += msg_print_text(msg, prev, syslog, buf + l, size - l); 2790 l += msg_print_text(msg, prev, syslog, buf + l, size - l);
2812 idx = log_next(idx); 2791 idx = log_next(idx);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 4041f5747e73..a146ee327f6a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -469,7 +469,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
469 /* Architecture-specific hardware disable .. */ 469 /* Architecture-specific hardware disable .. */
470 ptrace_disable(child); 470 ptrace_disable(child);
471 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 471 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
472 flush_ptrace_hw_breakpoint(child);
473 472
474 write_lock_irq(&tasklist_lock); 473 write_lock_irq(&tasklist_lock);
475 /* 474 /*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b7c32cb7bfeb..05c39f030314 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p)
933/** 933/**
934 * task_curr - is this task currently executing on a CPU? 934 * task_curr - is this task currently executing on a CPU?
935 * @p: the task in question. 935 * @p: the task in question.
936 *
937 * Return: 1 if the task is currently executing. 0 otherwise.
936 */ 938 */
937inline int task_curr(const struct task_struct *p) 939inline int task_curr(const struct task_struct *p)
938{ 940{
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
1482 * the simpler "current->state = TASK_RUNNING" to mark yourself 1484 * the simpler "current->state = TASK_RUNNING" to mark yourself
1483 * runnable without the overhead of this. 1485 * runnable without the overhead of this.
1484 * 1486 *
1485 * Returns %true if @p was woken up, %false if it was already running 1487 * Return: %true if @p was woken up, %false if it was already running.
1486 * or @state didn't match @p's state. 1488 * or @state didn't match @p's state.
1487 */ 1489 */
1488static int 1490static int
@@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1491 unsigned long flags; 1493 unsigned long flags;
1492 int cpu, success = 0; 1494 int cpu, success = 0;
1493 1495
1494 smp_wmb(); 1496 /*
1497 * If we are going to wake up a thread waiting for CONDITION we
1498 * need to ensure that CONDITION=1 done by the caller can not be
1499 * reordered with p->state check below. This pairs with mb() in
1500 * set_current_state() the waiting thread does.
1501 */
1502 smp_mb__before_spinlock();
1495 raw_spin_lock_irqsave(&p->pi_lock, flags); 1503 raw_spin_lock_irqsave(&p->pi_lock, flags);
1496 if (!(p->state & state)) 1504 if (!(p->state & state))
1497 goto out; 1505 goto out;
@@ -1577,8 +1585,9 @@ out:
1577 * @p: The process to be woken up. 1585 * @p: The process to be woken up.
1578 * 1586 *
1579 * Attempt to wake up the nominated process and move it to the set of runnable 1587 * Attempt to wake up the nominated process and move it to the set of runnable
1580 * processes. Returns 1 if the process was woken up, 0 if it was already 1588 * processes.
1581 * running. 1589 *
1590 * Return: 1 if the process was woken up, 0 if it was already running.
1582 * 1591 *
1583 * It may be assumed that this function implies a write memory barrier before 1592 * It may be assumed that this function implies a write memory barrier before
1584 * changing the task state if and only if any tasks are woken up. 1593 * changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2200,8 @@ void scheduler_tick(void)
2191 * This makes sure that uptime, CFS vruntime, load 2200 * This makes sure that uptime, CFS vruntime, load
2192 * balancing, etc... continue to move forward, even 2201 * balancing, etc... continue to move forward, even
2193 * with a very low granularity. 2202 * with a very low granularity.
2203 *
2204 * Return: Maximum deferment in nanoseconds.
2194 */ 2205 */
2195u64 scheduler_tick_max_deferment(void) 2206u64 scheduler_tick_max_deferment(void)
2196{ 2207{
@@ -2394,6 +2405,12 @@ need_resched:
2394 if (sched_feat(HRTICK)) 2405 if (sched_feat(HRTICK))
2395 hrtick_clear(rq); 2406 hrtick_clear(rq);
2396 2407
2408 /*
2409 * Make sure that signal_pending_state()->signal_pending() below
2410 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
2411 * done by the caller to avoid the race with signal_wake_up().
2412 */
2413 smp_mb__before_spinlock();
2397 raw_spin_lock_irq(&rq->lock); 2414 raw_spin_lock_irq(&rq->lock);
2398 2415
2399 switch_count = &prev->nivcsw; 2416 switch_count = &prev->nivcsw;
@@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion);
2796 * specified timeout to expire. The timeout is in jiffies. It is not 2813 * specified timeout to expire. The timeout is in jiffies. It is not
2797 * interruptible. 2814 * interruptible.
2798 * 2815 *
2799 * The return value is 0 if timed out, and positive (at least 1, or number of 2816 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2800 * jiffies left till timeout) if completed. 2817 * till timeout) if completed.
2801 */ 2818 */
2802unsigned long __sched 2819unsigned long __sched
2803wait_for_completion_timeout(struct completion *x, unsigned long timeout) 2820wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
2829 * specified timeout to expire. The timeout is in jiffies. It is not 2846 * specified timeout to expire. The timeout is in jiffies. It is not
2830 * interruptible. The caller is accounted as waiting for IO. 2847 * interruptible. The caller is accounted as waiting for IO.
2831 * 2848 *
2832 * The return value is 0 if timed out, and positive (at least 1, or number of 2849 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2833 * jiffies left till timeout) if completed. 2850 * till timeout) if completed.
2834 */ 2851 */
2835unsigned long __sched 2852unsigned long __sched
2836wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) 2853wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout);
2846 * This waits for completion of a specific task to be signaled. It is 2863 * This waits for completion of a specific task to be signaled. It is
2847 * interruptible. 2864 * interruptible.
2848 * 2865 *
2849 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2866 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2850 */ 2867 */
2851int __sched wait_for_completion_interruptible(struct completion *x) 2868int __sched wait_for_completion_interruptible(struct completion *x)
2852{ 2869{
@@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
2865 * This waits for either a completion of a specific task to be signaled or for a 2882 * This waits for either a completion of a specific task to be signaled or for a
2866 * specified timeout to expire. It is interruptible. The timeout is in jiffies. 2883 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
2867 * 2884 *
2868 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2885 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2869 * positive (at least 1, or number of jiffies left till timeout) if completed. 2886 * or number of jiffies left till timeout) if completed.
2870 */ 2887 */
2871long __sched 2888long __sched
2872wait_for_completion_interruptible_timeout(struct completion *x, 2889wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
2883 * This waits to be signaled for completion of a specific task. It can be 2900 * This waits to be signaled for completion of a specific task. It can be
2884 * interrupted by a kill signal. 2901 * interrupted by a kill signal.
2885 * 2902 *
2886 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2903 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2887 */ 2904 */
2888int __sched wait_for_completion_killable(struct completion *x) 2905int __sched wait_for_completion_killable(struct completion *x)
2889{ 2906{
@@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable);
2903 * signaled or for a specified timeout to expire. It can be 2920 * signaled or for a specified timeout to expire. It can be
2904 * interrupted by a kill signal. The timeout is in jiffies. 2921 * interrupted by a kill signal. The timeout is in jiffies.
2905 * 2922 *
2906 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2923 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2907 * positive (at least 1, or number of jiffies left till timeout) if completed. 2924 * or number of jiffies left till timeout) if completed.
2908 */ 2925 */
2909long __sched 2926long __sched
2910wait_for_completion_killable_timeout(struct completion *x, 2927wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
2918 * try_wait_for_completion - try to decrement a completion without blocking 2935 * try_wait_for_completion - try to decrement a completion without blocking
2919 * @x: completion structure 2936 * @x: completion structure
2920 * 2937 *
2921 * Returns: 0 if a decrement cannot be done without blocking 2938 * Return: 0 if a decrement cannot be done without blocking
2922 * 1 if a decrement succeeded. 2939 * 1 if a decrement succeeded.
2923 * 2940 *
2924 * If a completion is being used as a counting completion, 2941 * If a completion is being used as a counting completion,
@@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
2945 * completion_done - Test to see if a completion has any waiters 2962 * completion_done - Test to see if a completion has any waiters
2946 * @x: completion structure 2963 * @x: completion structure
2947 * 2964 *
2948 * Returns: 0 if there are waiters (wait_for_completion() in progress) 2965 * Return: 0 if there are waiters (wait_for_completion() in progress)
2949 * 1 if there are no waiters. 2966 * 1 if there are no waiters.
2950 * 2967 *
2951 */ 2968 */
@@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment)
3182 * task_prio - return the priority value of a given task. 3199 * task_prio - return the priority value of a given task.
3183 * @p: the task in question. 3200 * @p: the task in question.
3184 * 3201 *
3185 * This is the priority value as seen by users in /proc. 3202 * Return: The priority value as seen by users in /proc.
3186 * RT tasks are offset by -200. Normal tasks are centered 3203 * RT tasks are offset by -200. Normal tasks are centered
3187 * around 0, value goes from -16 to +15. 3204 * around 0, value goes from -16 to +15.
3188 */ 3205 */
@@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p)
3194/** 3211/**
3195 * task_nice - return the nice value of a given task. 3212 * task_nice - return the nice value of a given task.
3196 * @p: the task in question. 3213 * @p: the task in question.
3214 *
3215 * Return: The nice value [ -20 ... 0 ... 19 ].
3197 */ 3216 */
3198int task_nice(const struct task_struct *p) 3217int task_nice(const struct task_struct *p)
3199{ 3218{
@@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice);
3204/** 3223/**
3205 * idle_cpu - is a given cpu idle currently? 3224 * idle_cpu - is a given cpu idle currently?
3206 * @cpu: the processor in question. 3225 * @cpu: the processor in question.
3226 *
3227 * Return: 1 if the CPU is currently idle. 0 otherwise.
3207 */ 3228 */
3208int idle_cpu(int cpu) 3229int idle_cpu(int cpu)
3209{ 3230{
@@ -3226,6 +3247,8 @@ int idle_cpu(int cpu)
3226/** 3247/**
3227 * idle_task - return the idle task for a given cpu. 3248 * idle_task - return the idle task for a given cpu.
3228 * @cpu: the processor in question. 3249 * @cpu: the processor in question.
3250 *
3251 * Return: The idle task for the cpu @cpu.
3229 */ 3252 */
3230struct task_struct *idle_task(int cpu) 3253struct task_struct *idle_task(int cpu)
3231{ 3254{
@@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu)
3235/** 3258/**
3236 * find_process_by_pid - find a process with a matching PID value. 3259 * find_process_by_pid - find a process with a matching PID value.
3237 * @pid: the pid in question. 3260 * @pid: the pid in question.
3261 *
3262 * The task of @pid, if found. %NULL otherwise.
3238 */ 3263 */
3239static struct task_struct *find_process_by_pid(pid_t pid) 3264static struct task_struct *find_process_by_pid(pid_t pid)
3240{ 3265{
@@ -3432,6 +3457,8 @@ recheck:
3432 * @policy: new policy. 3457 * @policy: new policy.
3433 * @param: structure containing the new RT priority. 3458 * @param: structure containing the new RT priority.
3434 * 3459 *
3460 * Return: 0 on success. An error code otherwise.
3461 *
3435 * NOTE that the task may be already dead. 3462 * NOTE that the task may be already dead.
3436 */ 3463 */
3437int sched_setscheduler(struct task_struct *p, int policy, 3464int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
3451 * current context has permission. For example, this is needed in 3478 * current context has permission. For example, this is needed in
3452 * stop_machine(): we create temporary high priority worker threads, 3479 * stop_machine(): we create temporary high priority worker threads,
3453 * but our caller might not have that capability. 3480 * but our caller might not have that capability.
3481 *
3482 * Return: 0 on success. An error code otherwise.
3454 */ 3483 */
3455int sched_setscheduler_nocheck(struct task_struct *p, int policy, 3484int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3456 const struct sched_param *param) 3485 const struct sched_param *param)
@@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3485 * @pid: the pid in question. 3514 * @pid: the pid in question.
3486 * @policy: new policy. 3515 * @policy: new policy.
3487 * @param: structure containing the new RT priority. 3516 * @param: structure containing the new RT priority.
3517 *
3518 * Return: 0 on success. An error code otherwise.
3488 */ 3519 */
3489SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 3520SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3490 struct sched_param __user *, param) 3521 struct sched_param __user *, param)
@@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3500 * sys_sched_setparam - set/change the RT priority of a thread 3531 * sys_sched_setparam - set/change the RT priority of a thread
3501 * @pid: the pid in question. 3532 * @pid: the pid in question.
3502 * @param: structure containing the new RT priority. 3533 * @param: structure containing the new RT priority.
3534 *
3535 * Return: 0 on success. An error code otherwise.
3503 */ 3536 */
3504SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 3537SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3505{ 3538{
@@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3509/** 3542/**
3510 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 3543 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3511 * @pid: the pid in question. 3544 * @pid: the pid in question.
3545 *
3546 * Return: On success, the policy of the thread. Otherwise, a negative error
3547 * code.
3512 */ 3548 */
3513SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 3549SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3514{ 3550{
@@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3535 * sys_sched_getparam - get the RT priority of a thread 3571 * sys_sched_getparam - get the RT priority of a thread
3536 * @pid: the pid in question. 3572 * @pid: the pid in question.
3537 * @param: structure containing the RT priority. 3573 * @param: structure containing the RT priority.
3574 *
3575 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
3576 * code.
3538 */ 3577 */
3539SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3578SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3540{ 3579{
@@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3659 * @pid: pid of the process 3698 * @pid: pid of the process
3660 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3699 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3661 * @user_mask_ptr: user-space pointer to the new cpu mask 3700 * @user_mask_ptr: user-space pointer to the new cpu mask
3701 *
3702 * Return: 0 on success. An error code otherwise.
3662 */ 3703 */
3663SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 3704SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3664 unsigned long __user *, user_mask_ptr) 3705 unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3751,8 @@ out_unlock:
3710 * @pid: pid of the process 3751 * @pid: pid of the process
3711 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3752 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3712 * @user_mask_ptr: user-space pointer to hold the current cpu mask 3753 * @user_mask_ptr: user-space pointer to hold the current cpu mask
3754 *
3755 * Return: 0 on success. An error code otherwise.
3713 */ 3756 */
3714SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 3757SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3715 unsigned long __user *, user_mask_ptr) 3758 unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3744 * 3787 *
3745 * This function yields the current CPU to other tasks. If there are no 3788 * This function yields the current CPU to other tasks. If there are no
3746 * other threads running on this CPU then this function will return. 3789 * other threads running on this CPU then this function will return.
3790 *
3791 * Return: 0.
3747 */ 3792 */
3748SYSCALL_DEFINE0(sched_yield) 3793SYSCALL_DEFINE0(sched_yield)
3749{ 3794{
@@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield);
3869 * It's the caller's job to ensure that the target task struct 3914 * It's the caller's job to ensure that the target task struct
3870 * can't go away on us before we can do any checks. 3915 * can't go away on us before we can do any checks.
3871 * 3916 *
3872 * Returns: 3917 * Return:
3873 * true (>0) if we indeed boosted the target task. 3918 * true (>0) if we indeed boosted the target task.
3874 * false (0) if we failed to boost the target. 3919 * false (0) if we failed to boost the target.
3875 * -ESRCH if there's no task to yield to. 3920 * -ESRCH if there's no task to yield to.
@@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout)
3972 * sys_sched_get_priority_max - return maximum RT priority. 4017 * sys_sched_get_priority_max - return maximum RT priority.
3973 * @policy: scheduling class. 4018 * @policy: scheduling class.
3974 * 4019 *
3975 * this syscall returns the maximum rt_priority that can be used 4020 * Return: On success, this syscall returns the maximum
3976 * by a given scheduling class. 4021 * rt_priority that can be used by a given scheduling class.
4022 * On failure, a negative error code is returned.
3977 */ 4023 */
3978SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4024SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3979{ 4025{
@@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3997 * sys_sched_get_priority_min - return minimum RT priority. 4043 * sys_sched_get_priority_min - return minimum RT priority.
3998 * @policy: scheduling class. 4044 * @policy: scheduling class.
3999 * 4045 *
4000 * this syscall returns the minimum rt_priority that can be used 4046 * Return: On success, this syscall returns the minimum
4001 * by a given scheduling class. 4047 * rt_priority that can be used by a given scheduling class.
4048 * On failure, a negative error code is returned.
4002 */ 4049 */
4003SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4050SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4004{ 4051{
@@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4024 * 4071 *
4025 * this syscall writes the default timeslice value of a given process 4072 * this syscall writes the default timeslice value of a given process
4026 * into the user-space timespec buffer. A value of '0' means infinity. 4073 * into the user-space timespec buffer. A value of '0' means infinity.
4074 *
4075 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4076 * an error code.
4027 */ 4077 */
4028SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4078SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4029 struct timespec __user *, interval) 4079 struct timespec __user *, interval)
@@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void)
6632 * @cpu: the processor in question. 6682 * @cpu: the processor in question.
6633 * 6683 *
6634 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6684 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6685 *
6686 * Return: The current task for @cpu.
6635 */ 6687 */
6636struct task_struct *curr_task(int cpu) 6688struct task_struct *curr_task(int cpu)
6637{ 6689{
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 1095e878a46f..8b836b376d91 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -62,7 +62,7 @@ static int convert_prio(int prio)
62 * any discrepancies created by racing against the uncertainty of the current 62 * any discrepancies created by racing against the uncertainty of the current
63 * priority configuration. 63 * priority configuration.
64 * 64 *
65 * Returns: (int)bool - CPUs were found 65 * Return: (int)bool - CPUs were found
66 */ 66 */
67int cpupri_find(struct cpupri *cp, struct task_struct *p, 67int cpupri_find(struct cpupri *cp, struct task_struct *p,
68 struct cpumask *lowest_mask) 68 struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
203 * cpupri_init - initialize the cpupri structure 203 * cpupri_init - initialize the cpupri structure
204 * @cp: The cpupri context 204 * @cp: The cpupri context
205 * 205 *
206 * Returns: -ENOMEM if memory fails. 206 * Return: -ENOMEM on memory allocation failure.
207 */ 207 */
208int cpupri_init(struct cpupri *cp) 208int cpupri_init(struct cpupri *cp)
209{ 209{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bb456f44b7b1..68f1609ca149 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -851,7 +851,7 @@ void task_numa_fault(int node, int pages, bool migrated)
851{ 851{
852 struct task_struct *p = current; 852 struct task_struct *p = current;
853 853
854 if (!sched_feat_numa(NUMA)) 854 if (!numabalancing_enabled)
855 return; 855 return;
856 856
857 /* FIXME: Allocate task-specific structure for placement policy here */ 857 /* FIXME: Allocate task-specific structure for placement policy here */
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2032 */ 2032 */
2033 update_entity_load_avg(curr, 1); 2033 update_entity_load_avg(curr, 1);
2034 update_cfs_rq_blocked_load(cfs_rq, 1); 2034 update_cfs_rq_blocked_load(cfs_rq, 1);
2035 update_cfs_shares(cfs_rq);
2035 2036
2036#ifdef CONFIG_SCHED_HRTICK 2037#ifdef CONFIG_SCHED_HRTICK
2037 /* 2038 /*
@@ -4280,6 +4281,8 @@ struct sg_lb_stats {
4280 * get_sd_load_idx - Obtain the load index for a given sched domain. 4281 * get_sd_load_idx - Obtain the load index for a given sched domain.
4281 * @sd: The sched_domain whose load_idx is to be obtained. 4282 * @sd: The sched_domain whose load_idx is to be obtained.
4282 * @idle: The Idle status of the CPU for whose sd load_icx is obtained. 4283 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4284 *
4285 * Return: The load index.
4283 */ 4286 */
4284static inline int get_sd_load_idx(struct sched_domain *sd, 4287static inline int get_sd_load_idx(struct sched_domain *sd,
4285 enum cpu_idle_type idle) 4288 enum cpu_idle_type idle)
@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
4574 * 4577 *
4575 * Determine if @sg is a busier group than the previously selected 4578 * Determine if @sg is a busier group than the previously selected
4576 * busiest group. 4579 * busiest group.
4580 *
4581 * Return: %true if @sg is a busier group than the previously selected
4582 * busiest group. %false otherwise.
4577 */ 4583 */
4578static bool update_sd_pick_busiest(struct lb_env *env, 4584static bool update_sd_pick_busiest(struct lb_env *env,
4579 struct sd_lb_stats *sds, 4585 struct sd_lb_stats *sds,
@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
4691 * assuming lower CPU number will be equivalent to lower a SMT thread 4697 * assuming lower CPU number will be equivalent to lower a SMT thread
4692 * number. 4698 * number.
4693 * 4699 *
4694 * Returns 1 when packing is required and a task should be moved to 4700 * Return: 1 when packing is required and a task should be moved to
4695 * this CPU. The amount of the imbalance is returned in *imbalance. 4701 * this CPU. The amount of the imbalance is returned in *imbalance.
4696 * 4702 *
4697 * @env: The load balancing environment. 4703 * @env: The load balancing environment.
@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4869 * @balance: Pointer to a variable indicating if this_cpu 4875 * @balance: Pointer to a variable indicating if this_cpu
4870 * is the appropriate cpu to perform load balancing at this_level. 4876 * is the appropriate cpu to perform load balancing at this_level.
4871 * 4877 *
4872 * Returns: - the busiest group if imbalance exists. 4878 * Return: - The busiest group if imbalance exists.
4873 * - If no imbalance and user has opted for power-savings balance, 4879 * - If no imbalance and user has opted for power-savings balance,
4874 * return the least loaded group whose CPUs can be 4880 * return the least loaded group whose CPUs can be
4875 * put to idle by rebalancing its tasks onto our group. 4881 * put to idle by rebalancing its tasks onto our group.
@@ -5786,7 +5792,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
5786 entity_tick(cfs_rq, se, queued); 5792 entity_tick(cfs_rq, se, queued);
5787 } 5793 }
5788 5794
5789 if (sched_feat_numa(NUMA)) 5795 if (numabalancing_enabled)
5790 task_tick_numa(rq, curr); 5796 task_tick_numa(rq, curr);
5791 5797
5792 update_rq_runnable_avg(rq, 1); 5798 update_rq_runnable_avg(rq, 1);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ac09d98490aa..07f6fc468e17 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2346,7 +2346,11 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,
2346 int write, void *data) 2346 int write, void *data)
2347{ 2347{
2348 if (write) { 2348 if (write) {
2349 *valp = msecs_to_jiffies(*negp ? -*lvalp : *lvalp); 2349 unsigned long jif = msecs_to_jiffies(*negp ? -*lvalp : *lvalp);
2350
2351 if (jif > INT_MAX)
2352 return 1;
2353 *valp = (int)jif;
2350 } else { 2354 } else {
2351 int val = *valp; 2355 int val = *valp;
2352 unsigned long lval; 2356 unsigned long lval;
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a326f27d7f09..0b479a6a22bb 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
121 BUG_ON(bits > 32); 121 BUG_ON(bits > 32);
122 WARN_ON(!irqs_disabled()); 122 WARN_ON(!irqs_disabled());
123 read_sched_clock = read; 123 read_sched_clock = read;
124 sched_clock_mask = (1 << bits) - 1; 124 sched_clock_mask = (1ULL << bits) - 1;
125 cd.rate = rate; 125 cd.rate = rate;
126 126
127 /* calculate the mult/shift to convert counter ticks to ns. */ 127 /* calculate the mult/shift to convert counter ticks to ns. */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e80183f4a6c4..e8a1516cc0a3 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -182,7 +182,8 @@ static bool can_stop_full_tick(void)
182 * Don't allow the user to think they can get 182 * Don't allow the user to think they can get
183 * full NO_HZ with this machine. 183 * full NO_HZ with this machine.
184 */ 184 */
185 WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); 185 WARN_ONCE(have_nohz_full_mask,
186 "NO_HZ FULL will not work with unstable sched clock");
186 return false; 187 return false;
187 } 188 }
188#endif 189#endif
@@ -343,8 +344,6 @@ static int tick_nohz_init_all(void)
343 344
344void __init tick_nohz_init(void) 345void __init tick_nohz_init(void)
345{ 346{
346 int cpu;
347
348 if (!have_nohz_full_mask) { 347 if (!have_nohz_full_mask) {
349 if (tick_nohz_init_all() < 0) 348 if (tick_nohz_init_all() < 0)
350 return; 349 return;
@@ -827,13 +826,10 @@ void tick_nohz_irq_exit(void)
827{ 826{
828 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 827 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
829 828
830 if (ts->inidle) { 829 if (ts->inidle)
831 /* Cancel the timer because CPU already waken up from the C-states*/
832 menu_hrtimer_cancel();
833 __tick_nohz_idle_enter(ts); 830 __tick_nohz_idle_enter(ts);
834 } else { 831 else
835 tick_nohz_full_stop_tick(ts); 832 tick_nohz_full_stop_tick(ts);
836 }
837} 833}
838 834
839/** 835/**
@@ -931,8 +927,6 @@ void tick_nohz_idle_exit(void)
931 927
932 ts->inidle = 0; 928 ts->inidle = 0;
933 929
934 /* Cancel the timer because CPU already waken up from the C-states*/
935 menu_hrtimer_cancel();
936 if (ts->idle_active || ts->tick_stopped) 930 if (ts->idle_active || ts->tick_stopped)
937 now = ktime_get(); 931 now = ktime_get();
938 932
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 67708f46baae..a6d098c6df3f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1441,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1441 * the hashes are freed with call_rcu_sched(). 1441 * the hashes are freed with call_rcu_sched().
1442 */ 1442 */
1443static int 1443static int
1444ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) 1444ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1445{ 1445{
1446 struct ftrace_hash *filter_hash; 1446 struct ftrace_hash *filter_hash;
1447 struct ftrace_hash *notrace_hash; 1447 struct ftrace_hash *notrace_hash;
1448 int ret; 1448 int ret;
1449 1449
1450#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1451 /*
1452 * There's a small race when adding ops that the ftrace handler
1453 * that wants regs, may be called without them. We can not
1454 * allow that handler to be called if regs is NULL.
1455 */
1456 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1457 return 0;
1458#endif
1459
1450 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); 1460 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1451 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); 1461 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1452 1462
@@ -2159,12 +2169,57 @@ static cycle_t ftrace_update_time;
2159static unsigned long ftrace_update_cnt; 2169static unsigned long ftrace_update_cnt;
2160unsigned long ftrace_update_tot_cnt; 2170unsigned long ftrace_update_tot_cnt;
2161 2171
2162static int ops_traces_mod(struct ftrace_ops *ops) 2172static inline int ops_traces_mod(struct ftrace_ops *ops)
2163{ 2173{
2164 struct ftrace_hash *hash; 2174 /*
2175 * Filter_hash being empty will default to trace module.
2176 * But notrace hash requires a test of individual module functions.
2177 */
2178 return ftrace_hash_empty(ops->filter_hash) &&
2179 ftrace_hash_empty(ops->notrace_hash);
2180}
2181
2182/*
2183 * Check if the current ops references the record.
2184 *
2185 * If the ops traces all functions, then it was already accounted for.
2186 * If the ops does not trace the current record function, skip it.
2187 * If the ops ignores the function via notrace filter, skip it.
2188 */
2189static inline bool
2190ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2191{
2192 /* If ops isn't enabled, ignore it */
2193 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2194 return 0;
2195
2196 /* If ops traces all mods, we already accounted for it */
2197 if (ops_traces_mod(ops))
2198 return 0;
2199
2200 /* The function must be in the filter */
2201 if (!ftrace_hash_empty(ops->filter_hash) &&
2202 !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2203 return 0;
2204
2205 /* If in notrace hash, we ignore it too */
2206 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2207 return 0;
2165 2208
2166 hash = ops->filter_hash; 2209 return 1;
2167 return ftrace_hash_empty(hash); 2210}
2211
2212static int referenced_filters(struct dyn_ftrace *rec)
2213{
2214 struct ftrace_ops *ops;
2215 int cnt = 0;
2216
2217 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2218 if (ops_references_rec(ops, rec))
2219 cnt++;
2220 }
2221
2222 return cnt;
2168} 2223}
2169 2224
2170static int ftrace_update_code(struct module *mod) 2225static int ftrace_update_code(struct module *mod)
@@ -2173,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)
2173 struct dyn_ftrace *p; 2228 struct dyn_ftrace *p;
2174 cycle_t start, stop; 2229 cycle_t start, stop;
2175 unsigned long ref = 0; 2230 unsigned long ref = 0;
2231 bool test = false;
2176 int i; 2232 int i;
2177 2233
2178 /* 2234 /*
@@ -2186,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)
2186 2242
2187 for (ops = ftrace_ops_list; 2243 for (ops = ftrace_ops_list;
2188 ops != &ftrace_list_end; ops = ops->next) { 2244 ops != &ftrace_list_end; ops = ops->next) {
2189 if (ops->flags & FTRACE_OPS_FL_ENABLED && 2245 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2190 ops_traces_mod(ops)) 2246 if (ops_traces_mod(ops))
2191 ref++; 2247 ref++;
2248 else
2249 test = true;
2250 }
2192 } 2251 }
2193 } 2252 }
2194 2253
@@ -2198,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)
2198 for (pg = ftrace_new_pgs; pg; pg = pg->next) { 2257 for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2199 2258
2200 for (i = 0; i < pg->index; i++) { 2259 for (i = 0; i < pg->index; i++) {
2260 int cnt = ref;
2261
2201 /* If something went wrong, bail without enabling anything */ 2262 /* If something went wrong, bail without enabling anything */
2202 if (unlikely(ftrace_disabled)) 2263 if (unlikely(ftrace_disabled))
2203 return -1; 2264 return -1;
2204 2265
2205 p = &pg->records[i]; 2266 p = &pg->records[i];
2206 p->flags = ref; 2267 if (test)
2268 cnt += referenced_filters(p);
2269 p->flags = cnt;
2207 2270
2208 /* 2271 /*
2209 * Do the initial record conversion from mcount jump 2272 * Do the initial record conversion from mcount jump
@@ -2223,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)
2223 * conversion puts the module to the correct state, thus 2286 * conversion puts the module to the correct state, thus
2224 * passing the ftrace_make_call check. 2287 * passing the ftrace_make_call check.
2225 */ 2288 */
2226 if (ftrace_start_up && ref) { 2289 if (ftrace_start_up && cnt) {
2227 int failed = __ftrace_replace_code(p, 1); 2290 int failed = __ftrace_replace_code(p, 1);
2228 if (failed) 2291 if (failed)
2229 ftrace_bug(failed, p->ip); 2292 ftrace_bug(failed, p->ip);
@@ -3374,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3374 return add_hash_entry(hash, ip); 3437 return add_hash_entry(hash, ip);
3375} 3438}
3376 3439
3440static void ftrace_ops_update_code(struct ftrace_ops *ops)
3441{
3442 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3443 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3444}
3445
3377static int 3446static int
3378ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 3447ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3379 unsigned long ip, int remove, int reset, int enable) 3448 unsigned long ip, int remove, int reset, int enable)
@@ -3416,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3416 3485
3417 mutex_lock(&ftrace_lock); 3486 mutex_lock(&ftrace_lock);
3418 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3487 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3419 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED 3488 if (!ret)
3420 && ftrace_enabled) 3489 ftrace_ops_update_code(ops);
3421 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3422 3490
3423 mutex_unlock(&ftrace_lock); 3491 mutex_unlock(&ftrace_lock);
3424 3492
@@ -3645,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3645 mutex_lock(&ftrace_lock); 3713 mutex_lock(&ftrace_lock);
3646 ret = ftrace_hash_move(iter->ops, filter_hash, 3714 ret = ftrace_hash_move(iter->ops, filter_hash,
3647 orig_hash, iter->hash); 3715 orig_hash, iter->hash);
3648 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED) 3716 if (!ret)
3649 && ftrace_enabled) 3717 ftrace_ops_update_code(iter->ops);
3650 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3651 3718
3652 mutex_unlock(&ftrace_lock); 3719 mutex_unlock(&ftrace_lock);
3653 } 3720 }
@@ -4218,7 +4285,7 @@ static inline void ftrace_startup_enable(int command) { }
4218# define ftrace_shutdown_sysctl() do { } while (0) 4285# define ftrace_shutdown_sysctl() do { } while (0)
4219 4286
4220static inline int 4287static inline int
4221ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) 4288ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4222{ 4289{
4223 return 1; 4290 return 1;
4224} 4291}
@@ -4241,7 +4308,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4241 do_for_each_ftrace_op(op, ftrace_control_list) { 4308 do_for_each_ftrace_op(op, ftrace_control_list) {
4242 if (!(op->flags & FTRACE_OPS_FL_STUB) && 4309 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4243 !ftrace_function_local_disabled(op) && 4310 !ftrace_function_local_disabled(op) &&
4244 ftrace_ops_test(op, ip)) 4311 ftrace_ops_test(op, ip, regs))
4245 op->func(ip, parent_ip, op, regs); 4312 op->func(ip, parent_ip, op, regs);
4246 } while_for_each_ftrace_op(op); 4313 } while_for_each_ftrace_op(op);
4247 trace_recursion_clear(TRACE_CONTROL_BIT); 4314 trace_recursion_clear(TRACE_CONTROL_BIT);
@@ -4274,7 +4341,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4274 */ 4341 */
4275 preempt_disable_notrace(); 4342 preempt_disable_notrace();
4276 do_for_each_ftrace_op(op, ftrace_ops_list) { 4343 do_for_each_ftrace_op(op, ftrace_ops_list) {
4277 if (ftrace_ops_test(op, ip)) 4344 if (ftrace_ops_test(op, ip, regs))
4278 op->func(ip, parent_ip, op, regs); 4345 op->func(ip, parent_ip, op, regs);
4279 } while_for_each_ftrace_op(op); 4346 } while_for_each_ftrace_op(op);
4280 preempt_enable_notrace(); 4347 preempt_enable_notrace();
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index e444ff88f0a4..cc2f66f68dc5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -36,11 +36,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
36{ 36{
37 int ret; 37 int ret;
38 38
39 ret = trace_seq_printf(s, "# compressed entry header\n"); 39 ret = trace_seq_puts(s, "# compressed entry header\n");
40 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n"); 40 ret = trace_seq_puts(s, "\ttype_len : 5 bits\n");
41 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n"); 41 ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n");
42 ret = trace_seq_printf(s, "\tarray : 32 bits\n"); 42 ret = trace_seq_puts(s, "\tarray : 32 bits\n");
43 ret = trace_seq_printf(s, "\n"); 43 ret = trace_seq_putc(s, '\n');
44 ret = trace_seq_printf(s, "\tpadding : type == %d\n", 44 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
45 RINGBUF_TYPE_PADDING); 45 RINGBUF_TYPE_PADDING);
46 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", 46 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
@@ -1066,7 +1066,7 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1066} 1066}
1067 1067
1068/** 1068/**
1069 * check_pages - integrity check of buffer pages 1069 * rb_check_pages - integrity check of buffer pages
1070 * @cpu_buffer: CPU buffer with pages to test 1070 * @cpu_buffer: CPU buffer with pages to test
1071 * 1071 *
1072 * As a safety measure we check to make sure the data pages have not 1072 * As a safety measure we check to make sure the data pages have not
@@ -1258,7 +1258,7 @@ static int rb_cpu_notify(struct notifier_block *self,
1258#endif 1258#endif
1259 1259
1260/** 1260/**
1261 * ring_buffer_alloc - allocate a new ring_buffer 1261 * __ring_buffer_alloc - allocate a new ring_buffer
1262 * @size: the size in bytes per cpu that is needed. 1262 * @size: the size in bytes per cpu that is needed.
1263 * @flags: attributes to set for the ring buffer. 1263 * @flags: attributes to set for the ring buffer.
1264 * 1264 *
@@ -1607,6 +1607,7 @@ static void update_pages_handler(struct work_struct *work)
1607 * ring_buffer_resize - resize the ring buffer 1607 * ring_buffer_resize - resize the ring buffer
1608 * @buffer: the buffer to resize. 1608 * @buffer: the buffer to resize.
1609 * @size: the new size. 1609 * @size: the new size.
1610 * @cpu_id: the cpu buffer to resize
1610 * 1611 *
1611 * Minimum size is 2 * BUF_PAGE_SIZE. 1612 * Minimum size is 2 * BUF_PAGE_SIZE.
1612 * 1613 *
@@ -3956,11 +3957,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
3956 * expected. 3957 * expected.
3957 * 3958 *
3958 * After a sequence of ring_buffer_read_prepare calls, the user is 3959 * After a sequence of ring_buffer_read_prepare calls, the user is
3959 * expected to make at least one call to ring_buffer_prepare_sync. 3960 * expected to make at least one call to ring_buffer_read_prepare_sync.
3960 * Afterwards, ring_buffer_read_start is invoked to get things going 3961 * Afterwards, ring_buffer_read_start is invoked to get things going
3961 * for real. 3962 * for real.
3962 * 3963 *
3963 * This overall must be paired with ring_buffer_finish. 3964 * This overall must be paired with ring_buffer_read_finish.
3964 */ 3965 */
3965struct ring_buffer_iter * 3966struct ring_buffer_iter *
3966ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) 3967ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
@@ -4009,7 +4010,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4009 * an intervening ring_buffer_read_prepare_sync must have been 4010 * an intervening ring_buffer_read_prepare_sync must have been
4010 * performed. 4011 * performed.
4011 * 4012 *
4012 * Must be paired with ring_buffer_finish. 4013 * Must be paired with ring_buffer_read_finish.
4013 */ 4014 */
4014void 4015void
4015ring_buffer_read_start(struct ring_buffer_iter *iter) 4016ring_buffer_read_start(struct ring_buffer_iter *iter)
@@ -4031,7 +4032,7 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
4031EXPORT_SYMBOL_GPL(ring_buffer_read_start); 4032EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4032 4033
4033/** 4034/**
4034 * ring_buffer_finish - finish reading the iterator of the buffer 4035 * ring_buffer_read_finish - finish reading the iterator of the buffer
4035 * @iter: The iterator retrieved by ring_buffer_start 4036 * @iter: The iterator retrieved by ring_buffer_start
4036 * 4037 *
4037 * This re-enables the recording to the buffer, and frees the 4038 * This re-enables the recording to the buffer, and frees the
@@ -4346,6 +4347,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4346/** 4347/**
4347 * ring_buffer_alloc_read_page - allocate a page to read from buffer 4348 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4348 * @buffer: the buffer to allocate for. 4349 * @buffer: the buffer to allocate for.
4350 * @cpu: the cpu buffer to allocate.
4349 * 4351 *
4350 * This function is used in conjunction with ring_buffer_read_page. 4352 * This function is used in conjunction with ring_buffer_read_page.
4351 * When reading a full page from the ring buffer, these functions 4353 * When reading a full page from the ring buffer, these functions
@@ -4403,7 +4405,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4403 * to swap with a page in the ring buffer. 4405 * to swap with a page in the ring buffer.
4404 * 4406 *
4405 * for example: 4407 * for example:
4406 * rpage = ring_buffer_alloc_read_page(buffer); 4408 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
4407 * if (!rpage) 4409 * if (!rpage)
4408 * return error; 4410 * return error;
4409 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 4411 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0cd500bffd9b..496f94d57698 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer,
243} 243}
244EXPORT_SYMBOL_GPL(filter_current_check_discard); 244EXPORT_SYMBOL_GPL(filter_current_check_discard);
245 245
246cycle_t ftrace_now(int cpu) 246cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
247{ 247{
248 u64 ts; 248 u64 ts;
249 249
250 /* Early boot up does not have a buffer yet */ 250 /* Early boot up does not have a buffer yet */
251 if (!global_trace.trace_buffer.buffer) 251 if (!buf->buffer)
252 return trace_clock_local(); 252 return trace_clock_local();
253 253
254 ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu); 254 ts = ring_buffer_time_stamp(buf->buffer, cpu);
255 ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts); 255 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
256 256
257 return ts; 257 return ts;
258} 258}
259 259
260cycle_t ftrace_now(int cpu)
261{
262 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
263}
264
260/** 265/**
261 * tracing_is_enabled - Show if global_trace has been disabled 266 * tracing_is_enabled - Show if global_trace has been disabled
262 * 267 *
@@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
1211 /* Make sure all commits have finished */ 1216 /* Make sure all commits have finished */
1212 synchronize_sched(); 1217 synchronize_sched();
1213 1218
1214 buf->time_start = ftrace_now(buf->cpu); 1219 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1215 1220
1216 for_each_online_cpu(cpu) 1221 for_each_online_cpu(cpu)
1217 ring_buffer_reset_cpu(buffer, cpu); 1222 ring_buffer_reset_cpu(buffer, cpu);
@@ -1219,23 +1224,17 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
1219 ring_buffer_record_enable(buffer); 1224 ring_buffer_record_enable(buffer);
1220} 1225}
1221 1226
1222void tracing_reset_current(int cpu) 1227/* Must have trace_types_lock held */
1223{
1224 tracing_reset(&global_trace.trace_buffer, cpu);
1225}
1226
1227void tracing_reset_all_online_cpus(void) 1228void tracing_reset_all_online_cpus(void)
1228{ 1229{
1229 struct trace_array *tr; 1230 struct trace_array *tr;
1230 1231
1231 mutex_lock(&trace_types_lock);
1232 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1232 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1233 tracing_reset_online_cpus(&tr->trace_buffer); 1233 tracing_reset_online_cpus(&tr->trace_buffer);
1234#ifdef CONFIG_TRACER_MAX_TRACE 1234#ifdef CONFIG_TRACER_MAX_TRACE
1235 tracing_reset_online_cpus(&tr->max_buffer); 1235 tracing_reset_online_cpus(&tr->max_buffer);
1236#endif 1236#endif
1237 } 1237 }
1238 mutex_unlock(&trace_types_lock);
1239} 1238}
1240 1239
1241#define SAVED_CMDLINES 128 1240#define SAVED_CMDLINES 128
@@ -2843,6 +2842,17 @@ static int s_show(struct seq_file *m, void *v)
2843 return 0; 2842 return 0;
2844} 2843}
2845 2844
2845/*
2846 * Should be used after trace_array_get(), trace_types_lock
2847 * ensures that i_cdev was already initialized.
2848 */
2849static inline int tracing_get_cpu(struct inode *inode)
2850{
2851 if (inode->i_cdev) /* See trace_create_cpu_file() */
2852 return (long)inode->i_cdev - 1;
2853 return RING_BUFFER_ALL_CPUS;
2854}
2855
2846static const struct seq_operations tracer_seq_ops = { 2856static const struct seq_operations tracer_seq_ops = {
2847 .start = s_start, 2857 .start = s_start,
2848 .next = s_next, 2858 .next = s_next,
@@ -2851,9 +2861,9 @@ static const struct seq_operations tracer_seq_ops = {
2851}; 2861};
2852 2862
2853static struct trace_iterator * 2863static struct trace_iterator *
2854__tracing_open(struct trace_array *tr, struct trace_cpu *tc, 2864__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2855 struct inode *inode, struct file *file, bool snapshot)
2856{ 2865{
2866 struct trace_array *tr = inode->i_private;
2857 struct trace_iterator *iter; 2867 struct trace_iterator *iter;
2858 int cpu; 2868 int cpu;
2859 2869
@@ -2894,8 +2904,8 @@ __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
2894 iter->trace_buffer = &tr->trace_buffer; 2904 iter->trace_buffer = &tr->trace_buffer;
2895 iter->snapshot = snapshot; 2905 iter->snapshot = snapshot;
2896 iter->pos = -1; 2906 iter->pos = -1;
2907 iter->cpu_file = tracing_get_cpu(inode);
2897 mutex_init(&iter->mutex); 2908 mutex_init(&iter->mutex);
2898 iter->cpu_file = tc->cpu;
2899 2909
2900 /* Notify the tracer early; before we stop tracing. */ 2910 /* Notify the tracer early; before we stop tracing. */
2901 if (iter->trace && iter->trace->open) 2911 if (iter->trace && iter->trace->open)
@@ -2971,45 +2981,22 @@ static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
2971 filp->private_data = inode->i_private; 2981 filp->private_data = inode->i_private;
2972 2982
2973 return 0; 2983 return 0;
2974
2975}
2976
2977static int tracing_open_generic_tc(struct inode *inode, struct file *filp)
2978{
2979 struct trace_cpu *tc = inode->i_private;
2980 struct trace_array *tr = tc->tr;
2981
2982 if (tracing_disabled)
2983 return -ENODEV;
2984
2985 if (trace_array_get(tr) < 0)
2986 return -ENODEV;
2987
2988 filp->private_data = inode->i_private;
2989
2990 return 0;
2991
2992} 2984}
2993 2985
2994static int tracing_release(struct inode *inode, struct file *file) 2986static int tracing_release(struct inode *inode, struct file *file)
2995{ 2987{
2988 struct trace_array *tr = inode->i_private;
2996 struct seq_file *m = file->private_data; 2989 struct seq_file *m = file->private_data;
2997 struct trace_iterator *iter; 2990 struct trace_iterator *iter;
2998 struct trace_array *tr;
2999 int cpu; 2991 int cpu;
3000 2992
3001 /* Writes do not use seq_file, need to grab tr from inode */
3002 if (!(file->f_mode & FMODE_READ)) { 2993 if (!(file->f_mode & FMODE_READ)) {
3003 struct trace_cpu *tc = inode->i_private; 2994 trace_array_put(tr);
3004
3005 trace_array_put(tc->tr);
3006 return 0; 2995 return 0;
3007 } 2996 }
3008 2997
2998 /* Writes do not use seq_file */
3009 iter = m->private; 2999 iter = m->private;
3010 tr = iter->tr;
3011 trace_array_put(tr);
3012
3013 mutex_lock(&trace_types_lock); 3000 mutex_lock(&trace_types_lock);
3014 3001
3015 for_each_tracing_cpu(cpu) { 3002 for_each_tracing_cpu(cpu) {
@@ -3023,6 +3010,9 @@ static int tracing_release(struct inode *inode, struct file *file)
3023 if (!iter->snapshot) 3010 if (!iter->snapshot)
3024 /* reenable tracing if it was previously enabled */ 3011 /* reenable tracing if it was previously enabled */
3025 tracing_start_tr(tr); 3012 tracing_start_tr(tr);
3013
3014 __trace_array_put(tr);
3015
3026 mutex_unlock(&trace_types_lock); 3016 mutex_unlock(&trace_types_lock);
3027 3017
3028 mutex_destroy(&iter->mutex); 3018 mutex_destroy(&iter->mutex);
@@ -3042,15 +3032,6 @@ static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3042 return 0; 3032 return 0;
3043} 3033}
3044 3034
3045static int tracing_release_generic_tc(struct inode *inode, struct file *file)
3046{
3047 struct trace_cpu *tc = inode->i_private;
3048 struct trace_array *tr = tc->tr;
3049
3050 trace_array_put(tr);
3051 return 0;
3052}
3053
3054static int tracing_single_release_tr(struct inode *inode, struct file *file) 3035static int tracing_single_release_tr(struct inode *inode, struct file *file)
3055{ 3036{
3056 struct trace_array *tr = inode->i_private; 3037 struct trace_array *tr = inode->i_private;
@@ -3062,8 +3043,7 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file)
3062 3043
3063static int tracing_open(struct inode *inode, struct file *file) 3044static int tracing_open(struct inode *inode, struct file *file)
3064{ 3045{
3065 struct trace_cpu *tc = inode->i_private; 3046 struct trace_array *tr = inode->i_private;
3066 struct trace_array *tr = tc->tr;
3067 struct trace_iterator *iter; 3047 struct trace_iterator *iter;
3068 int ret = 0; 3048 int ret = 0;
3069 3049
@@ -3071,16 +3051,17 @@ static int tracing_open(struct inode *inode, struct file *file)
3071 return -ENODEV; 3051 return -ENODEV;
3072 3052
3073 /* If this file was open for write, then erase contents */ 3053 /* If this file was open for write, then erase contents */
3074 if ((file->f_mode & FMODE_WRITE) && 3054 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3075 (file->f_flags & O_TRUNC)) { 3055 int cpu = tracing_get_cpu(inode);
3076 if (tc->cpu == RING_BUFFER_ALL_CPUS) 3056
3057 if (cpu == RING_BUFFER_ALL_CPUS)
3077 tracing_reset_online_cpus(&tr->trace_buffer); 3058 tracing_reset_online_cpus(&tr->trace_buffer);
3078 else 3059 else
3079 tracing_reset(&tr->trace_buffer, tc->cpu); 3060 tracing_reset(&tr->trace_buffer, cpu);
3080 } 3061 }
3081 3062
3082 if (file->f_mode & FMODE_READ) { 3063 if (file->f_mode & FMODE_READ) {
3083 iter = __tracing_open(tr, tc, inode, file, false); 3064 iter = __tracing_open(inode, file, false);
3084 if (IS_ERR(iter)) 3065 if (IS_ERR(iter))
3085 ret = PTR_ERR(iter); 3066 ret = PTR_ERR(iter);
3086 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 3067 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
@@ -3447,6 +3428,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3447static int tracing_trace_options_open(struct inode *inode, struct file *file) 3428static int tracing_trace_options_open(struct inode *inode, struct file *file)
3448{ 3429{
3449 struct trace_array *tr = inode->i_private; 3430 struct trace_array *tr = inode->i_private;
3431 int ret;
3450 3432
3451 if (tracing_disabled) 3433 if (tracing_disabled)
3452 return -ENODEV; 3434 return -ENODEV;
@@ -3454,7 +3436,11 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file)
3454 if (trace_array_get(tr) < 0) 3436 if (trace_array_get(tr) < 0)
3455 return -ENODEV; 3437 return -ENODEV;
3456 3438
3457 return single_open(file, tracing_trace_options_show, inode->i_private); 3439 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3440 if (ret < 0)
3441 trace_array_put(tr);
3442
3443 return ret;
3458} 3444}
3459 3445
3460static const struct file_operations tracing_iter_fops = { 3446static const struct file_operations tracing_iter_fops = {
@@ -3537,14 +3523,14 @@ static const char readme_msg[] =
3537 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" 3523 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
3538 "\t\t\t Read the contents for more information\n" 3524 "\t\t\t Read the contents for more information\n"
3539#endif 3525#endif
3540#ifdef CONFIG_STACKTRACE 3526#ifdef CONFIG_STACK_TRACER
3541 " stack_trace\t\t- Shows the max stack trace when active\n" 3527 " stack_trace\t\t- Shows the max stack trace when active\n"
3542 " stack_max_size\t- Shows current max stack size that was traced\n" 3528 " stack_max_size\t- Shows current max stack size that was traced\n"
3543 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" 3529 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n"
3544#ifdef CONFIG_DYNAMIC_FTRACE 3530#ifdef CONFIG_DYNAMIC_FTRACE
3545 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" 3531 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
3546#endif 3532#endif
3547#endif /* CONFIG_STACKTRACE */ 3533#endif /* CONFIG_STACK_TRACER */
3548; 3534;
3549 3535
3550static ssize_t 3536static ssize_t
@@ -3941,8 +3927,7 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3941 3927
3942static int tracing_open_pipe(struct inode *inode, struct file *filp) 3928static int tracing_open_pipe(struct inode *inode, struct file *filp)
3943{ 3929{
3944 struct trace_cpu *tc = inode->i_private; 3930 struct trace_array *tr = inode->i_private;
3945 struct trace_array *tr = tc->tr;
3946 struct trace_iterator *iter; 3931 struct trace_iterator *iter;
3947 int ret = 0; 3932 int ret = 0;
3948 3933
@@ -3958,6 +3943,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3958 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3943 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3959 if (!iter) { 3944 if (!iter) {
3960 ret = -ENOMEM; 3945 ret = -ENOMEM;
3946 __trace_array_put(tr);
3961 goto out; 3947 goto out;
3962 } 3948 }
3963 3949
@@ -3987,9 +3973,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3987 if (trace_clocks[tr->clock_id].in_ns) 3973 if (trace_clocks[tr->clock_id].in_ns)
3988 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 3974 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3989 3975
3990 iter->cpu_file = tc->cpu; 3976 iter->tr = tr;
3991 iter->tr = tc->tr; 3977 iter->trace_buffer = &tr->trace_buffer;
3992 iter->trace_buffer = &tc->tr->trace_buffer; 3978 iter->cpu_file = tracing_get_cpu(inode);
3993 mutex_init(&iter->mutex); 3979 mutex_init(&iter->mutex);
3994 filp->private_data = iter; 3980 filp->private_data = iter;
3995 3981
@@ -4012,8 +3998,7 @@ fail:
4012static int tracing_release_pipe(struct inode *inode, struct file *file) 3998static int tracing_release_pipe(struct inode *inode, struct file *file)
4013{ 3999{
4014 struct trace_iterator *iter = file->private_data; 4000 struct trace_iterator *iter = file->private_data;
4015 struct trace_cpu *tc = inode->i_private; 4001 struct trace_array *tr = inode->i_private;
4016 struct trace_array *tr = tc->tr;
4017 4002
4018 mutex_lock(&trace_types_lock); 4003 mutex_lock(&trace_types_lock);
4019 4004
@@ -4166,6 +4151,7 @@ waitagain:
4166 memset(&iter->seq, 0, 4151 memset(&iter->seq, 0,
4167 sizeof(struct trace_iterator) - 4152 sizeof(struct trace_iterator) -
4168 offsetof(struct trace_iterator, seq)); 4153 offsetof(struct trace_iterator, seq));
4154 cpumask_clear(iter->started);
4169 iter->pos = -1; 4155 iter->pos = -1;
4170 4156
4171 trace_event_read_lock(); 4157 trace_event_read_lock();
@@ -4366,15 +4352,16 @@ static ssize_t
4366tracing_entries_read(struct file *filp, char __user *ubuf, 4352tracing_entries_read(struct file *filp, char __user *ubuf,
4367 size_t cnt, loff_t *ppos) 4353 size_t cnt, loff_t *ppos)
4368{ 4354{
4369 struct trace_cpu *tc = filp->private_data; 4355 struct inode *inode = file_inode(filp);
4370 struct trace_array *tr = tc->tr; 4356 struct trace_array *tr = inode->i_private;
4357 int cpu = tracing_get_cpu(inode);
4371 char buf[64]; 4358 char buf[64];
4372 int r = 0; 4359 int r = 0;
4373 ssize_t ret; 4360 ssize_t ret;
4374 4361
4375 mutex_lock(&trace_types_lock); 4362 mutex_lock(&trace_types_lock);
4376 4363
4377 if (tc->cpu == RING_BUFFER_ALL_CPUS) { 4364 if (cpu == RING_BUFFER_ALL_CPUS) {
4378 int cpu, buf_size_same; 4365 int cpu, buf_size_same;
4379 unsigned long size; 4366 unsigned long size;
4380 4367
@@ -4401,7 +4388,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
4401 } else 4388 } else
4402 r = sprintf(buf, "X\n"); 4389 r = sprintf(buf, "X\n");
4403 } else 4390 } else
4404 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10); 4391 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4405 4392
4406 mutex_unlock(&trace_types_lock); 4393 mutex_unlock(&trace_types_lock);
4407 4394
@@ -4413,7 +4400,8 @@ static ssize_t
4413tracing_entries_write(struct file *filp, const char __user *ubuf, 4400tracing_entries_write(struct file *filp, const char __user *ubuf,
4414 size_t cnt, loff_t *ppos) 4401 size_t cnt, loff_t *ppos)
4415{ 4402{
4416 struct trace_cpu *tc = filp->private_data; 4403 struct inode *inode = file_inode(filp);
4404 struct trace_array *tr = inode->i_private;
4417 unsigned long val; 4405 unsigned long val;
4418 int ret; 4406 int ret;
4419 4407
@@ -4427,8 +4415,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
4427 4415
4428 /* value is in KB */ 4416 /* value is in KB */
4429 val <<= 10; 4417 val <<= 10;
4430 4418 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4431 ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
4432 if (ret < 0) 4419 if (ret < 0)
4433 return ret; 4420 return ret;
4434 4421
@@ -4482,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
4482 4469
4483 /* disable tracing ? */ 4470 /* disable tracing ? */
4484 if (trace_flags & TRACE_ITER_STOP_ON_FREE) 4471 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4485 tracing_off(); 4472 tracer_tracing_off(tr);
4486 /* resize the ring buffer to 0 */ 4473 /* resize the ring buffer to 0 */
4487 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 4474 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4488 4475
@@ -4647,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4647 * New clock may not be consistent with the previous clock. 4634 * New clock may not be consistent with the previous clock.
4648 * Reset the buffer so that it doesn't have incomparable timestamps. 4635 * Reset the buffer so that it doesn't have incomparable timestamps.
4649 */ 4636 */
4650 tracing_reset_online_cpus(&global_trace.trace_buffer); 4637 tracing_reset_online_cpus(&tr->trace_buffer);
4651 4638
4652#ifdef CONFIG_TRACER_MAX_TRACE 4639#ifdef CONFIG_TRACER_MAX_TRACE
4653 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) 4640 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4654 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 4641 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4655 tracing_reset_online_cpus(&global_trace.max_buffer); 4642 tracing_reset_online_cpus(&tr->max_buffer);
4656#endif 4643#endif
4657 4644
4658 mutex_unlock(&trace_types_lock); 4645 mutex_unlock(&trace_types_lock);
@@ -4689,8 +4676,7 @@ struct ftrace_buffer_info {
4689#ifdef CONFIG_TRACER_SNAPSHOT 4676#ifdef CONFIG_TRACER_SNAPSHOT
4690static int tracing_snapshot_open(struct inode *inode, struct file *file) 4677static int tracing_snapshot_open(struct inode *inode, struct file *file)
4691{ 4678{
4692 struct trace_cpu *tc = inode->i_private; 4679 struct trace_array *tr = inode->i_private;
4693 struct trace_array *tr = tc->tr;
4694 struct trace_iterator *iter; 4680 struct trace_iterator *iter;
4695 struct seq_file *m; 4681 struct seq_file *m;
4696 int ret = 0; 4682 int ret = 0;
@@ -4699,26 +4685,29 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
4699 return -ENODEV; 4685 return -ENODEV;
4700 4686
4701 if (file->f_mode & FMODE_READ) { 4687 if (file->f_mode & FMODE_READ) {
4702 iter = __tracing_open(tr, tc, inode, file, true); 4688 iter = __tracing_open(inode, file, true);
4703 if (IS_ERR(iter)) 4689 if (IS_ERR(iter))
4704 ret = PTR_ERR(iter); 4690 ret = PTR_ERR(iter);
4705 } else { 4691 } else {
4706 /* Writes still need the seq_file to hold the private data */ 4692 /* Writes still need the seq_file to hold the private data */
4693 ret = -ENOMEM;
4707 m = kzalloc(sizeof(*m), GFP_KERNEL); 4694 m = kzalloc(sizeof(*m), GFP_KERNEL);
4708 if (!m) 4695 if (!m)
4709 return -ENOMEM; 4696 goto out;
4710 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 4697 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4711 if (!iter) { 4698 if (!iter) {
4712 kfree(m); 4699 kfree(m);
4713 return -ENOMEM; 4700 goto out;
4714 } 4701 }
4702 ret = 0;
4703
4715 iter->tr = tr; 4704 iter->tr = tr;
4716 iter->trace_buffer = &tc->tr->max_buffer; 4705 iter->trace_buffer = &tr->max_buffer;
4717 iter->cpu_file = tc->cpu; 4706 iter->cpu_file = tracing_get_cpu(inode);
4718 m->private = iter; 4707 m->private = iter;
4719 file->private_data = m; 4708 file->private_data = m;
4720 } 4709 }
4721 4710out:
4722 if (ret < 0) 4711 if (ret < 0)
4723 trace_array_put(tr); 4712 trace_array_put(tr);
4724 4713
@@ -4873,11 +4862,11 @@ static const struct file_operations tracing_pipe_fops = {
4873}; 4862};
4874 4863
4875static const struct file_operations tracing_entries_fops = { 4864static const struct file_operations tracing_entries_fops = {
4876 .open = tracing_open_generic_tc, 4865 .open = tracing_open_generic_tr,
4877 .read = tracing_entries_read, 4866 .read = tracing_entries_read,
4878 .write = tracing_entries_write, 4867 .write = tracing_entries_write,
4879 .llseek = generic_file_llseek, 4868 .llseek = generic_file_llseek,
4880 .release = tracing_release_generic_tc, 4869 .release = tracing_release_generic_tr,
4881}; 4870};
4882 4871
4883static const struct file_operations tracing_total_entries_fops = { 4872static const struct file_operations tracing_total_entries_fops = {
@@ -4929,8 +4918,7 @@ static const struct file_operations snapshot_raw_fops = {
4929 4918
4930static int tracing_buffers_open(struct inode *inode, struct file *filp) 4919static int tracing_buffers_open(struct inode *inode, struct file *filp)
4931{ 4920{
4932 struct trace_cpu *tc = inode->i_private; 4921 struct trace_array *tr = inode->i_private;
4933 struct trace_array *tr = tc->tr;
4934 struct ftrace_buffer_info *info; 4922 struct ftrace_buffer_info *info;
4935 int ret; 4923 int ret;
4936 4924
@@ -4948,10 +4936,8 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
4948 4936
4949 mutex_lock(&trace_types_lock); 4937 mutex_lock(&trace_types_lock);
4950 4938
4951 tr->ref++;
4952
4953 info->iter.tr = tr; 4939 info->iter.tr = tr;
4954 info->iter.cpu_file = tc->cpu; 4940 info->iter.cpu_file = tracing_get_cpu(inode);
4955 info->iter.trace = tr->current_trace; 4941 info->iter.trace = tr->current_trace;
4956 info->iter.trace_buffer = &tr->trace_buffer; 4942 info->iter.trace_buffer = &tr->trace_buffer;
4957 info->spare = NULL; 4943 info->spare = NULL;
@@ -5268,14 +5254,14 @@ static ssize_t
5268tracing_stats_read(struct file *filp, char __user *ubuf, 5254tracing_stats_read(struct file *filp, char __user *ubuf,
5269 size_t count, loff_t *ppos) 5255 size_t count, loff_t *ppos)
5270{ 5256{
5271 struct trace_cpu *tc = filp->private_data; 5257 struct inode *inode = file_inode(filp);
5272 struct trace_array *tr = tc->tr; 5258 struct trace_array *tr = inode->i_private;
5273 struct trace_buffer *trace_buf = &tr->trace_buffer; 5259 struct trace_buffer *trace_buf = &tr->trace_buffer;
5260 int cpu = tracing_get_cpu(inode);
5274 struct trace_seq *s; 5261 struct trace_seq *s;
5275 unsigned long cnt; 5262 unsigned long cnt;
5276 unsigned long long t; 5263 unsigned long long t;
5277 unsigned long usec_rem; 5264 unsigned long usec_rem;
5278 int cpu = tc->cpu;
5279 5265
5280 s = kmalloc(sizeof(*s), GFP_KERNEL); 5266 s = kmalloc(sizeof(*s), GFP_KERNEL);
5281 if (!s) 5267 if (!s)
@@ -5328,9 +5314,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
5328} 5314}
5329 5315
5330static const struct file_operations tracing_stats_fops = { 5316static const struct file_operations tracing_stats_fops = {
5331 .open = tracing_open_generic, 5317 .open = tracing_open_generic_tr,
5332 .read = tracing_stats_read, 5318 .read = tracing_stats_read,
5333 .llseek = generic_file_llseek, 5319 .llseek = generic_file_llseek,
5320 .release = tracing_release_generic_tr,
5334}; 5321};
5335 5322
5336#ifdef CONFIG_DYNAMIC_FTRACE 5323#ifdef CONFIG_DYNAMIC_FTRACE
@@ -5519,10 +5506,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5519 return tr->percpu_dir; 5506 return tr->percpu_dir;
5520} 5507}
5521 5508
5509static struct dentry *
5510trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5511 void *data, long cpu, const struct file_operations *fops)
5512{
5513 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5514
5515 if (ret) /* See tracing_get_cpu() */
5516 ret->d_inode->i_cdev = (void *)(cpu + 1);
5517 return ret;
5518}
5519
5522static void 5520static void
5523tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) 5521tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5524{ 5522{
5525 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
5526 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 5523 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5527 struct dentry *d_cpu; 5524 struct dentry *d_cpu;
5528 char cpu_dir[30]; /* 30 characters should be more than enough */ 5525 char cpu_dir[30]; /* 30 characters should be more than enough */
@@ -5538,28 +5535,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5538 } 5535 }
5539 5536
5540 /* per cpu trace_pipe */ 5537 /* per cpu trace_pipe */
5541 trace_create_file("trace_pipe", 0444, d_cpu, 5538 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5542 (void *)&data->trace_cpu, &tracing_pipe_fops); 5539 tr, cpu, &tracing_pipe_fops);
5543 5540
5544 /* per cpu trace */ 5541 /* per cpu trace */
5545 trace_create_file("trace", 0644, d_cpu, 5542 trace_create_cpu_file("trace", 0644, d_cpu,
5546 (void *)&data->trace_cpu, &tracing_fops); 5543 tr, cpu, &tracing_fops);
5547 5544
5548 trace_create_file("trace_pipe_raw", 0444, d_cpu, 5545 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5549 (void *)&data->trace_cpu, &tracing_buffers_fops); 5546 tr, cpu, &tracing_buffers_fops);
5550 5547
5551 trace_create_file("stats", 0444, d_cpu, 5548 trace_create_cpu_file("stats", 0444, d_cpu,
5552 (void *)&data->trace_cpu, &tracing_stats_fops); 5549 tr, cpu, &tracing_stats_fops);
5553 5550
5554 trace_create_file("buffer_size_kb", 0444, d_cpu, 5551 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5555 (void *)&data->trace_cpu, &tracing_entries_fops); 5552 tr, cpu, &tracing_entries_fops);
5556 5553
5557#ifdef CONFIG_TRACER_SNAPSHOT 5554#ifdef CONFIG_TRACER_SNAPSHOT
5558 trace_create_file("snapshot", 0644, d_cpu, 5555 trace_create_cpu_file("snapshot", 0644, d_cpu,
5559 (void *)&data->trace_cpu, &snapshot_fops); 5556 tr, cpu, &snapshot_fops);
5560 5557
5561 trace_create_file("snapshot_raw", 0444, d_cpu, 5558 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5562 (void *)&data->trace_cpu, &snapshot_raw_fops); 5559 tr, cpu, &snapshot_raw_fops);
5563#endif 5560#endif
5564} 5561}
5565 5562
@@ -5868,17 +5865,6 @@ struct dentry *trace_instance_dir;
5868static void 5865static void
5869init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); 5866init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5870 5867
5871static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5872{
5873 int cpu;
5874
5875 for_each_tracing_cpu(cpu) {
5876 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5877 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5878 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5879 }
5880}
5881
5882static int 5868static int
5883allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) 5869allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
5884{ 5870{
@@ -5896,8 +5882,6 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
5896 return -ENOMEM; 5882 return -ENOMEM;
5897 } 5883 }
5898 5884
5899 init_trace_buffers(tr, buf);
5900
5901 /* Allocate the first page for all buffers */ 5885 /* Allocate the first page for all buffers */
5902 set_buffer_entries(&tr->trace_buffer, 5886 set_buffer_entries(&tr->trace_buffer,
5903 ring_buffer_size(tr->trace_buffer.buffer, 0)); 5887 ring_buffer_size(tr->trace_buffer.buffer, 0));
@@ -5964,17 +5948,15 @@ static int new_instance_create(const char *name)
5964 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 5948 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
5965 goto out_free_tr; 5949 goto out_free_tr;
5966 5950
5967 /* Holder for file callbacks */
5968 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5969 tr->trace_cpu.tr = tr;
5970
5971 tr->dir = debugfs_create_dir(name, trace_instance_dir); 5951 tr->dir = debugfs_create_dir(name, trace_instance_dir);
5972 if (!tr->dir) 5952 if (!tr->dir)
5973 goto out_free_tr; 5953 goto out_free_tr;
5974 5954
5975 ret = event_trace_add_tracer(tr->dir, tr); 5955 ret = event_trace_add_tracer(tr->dir, tr);
5976 if (ret) 5956 if (ret) {
5957 debugfs_remove_recursive(tr->dir);
5977 goto out_free_tr; 5958 goto out_free_tr;
5959 }
5978 5960
5979 init_tracer_debugfs(tr, tr->dir); 5961 init_tracer_debugfs(tr, tr->dir);
5980 5962
@@ -6120,13 +6102,13 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6120 tr, &tracing_iter_fops); 6102 tr, &tracing_iter_fops);
6121 6103
6122 trace_create_file("trace", 0644, d_tracer, 6104 trace_create_file("trace", 0644, d_tracer,
6123 (void *)&tr->trace_cpu, &tracing_fops); 6105 tr, &tracing_fops);
6124 6106
6125 trace_create_file("trace_pipe", 0444, d_tracer, 6107 trace_create_file("trace_pipe", 0444, d_tracer,
6126 (void *)&tr->trace_cpu, &tracing_pipe_fops); 6108 tr, &tracing_pipe_fops);
6127 6109
6128 trace_create_file("buffer_size_kb", 0644, d_tracer, 6110 trace_create_file("buffer_size_kb", 0644, d_tracer,
6129 (void *)&tr->trace_cpu, &tracing_entries_fops); 6111 tr, &tracing_entries_fops);
6130 6112
6131 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 6113 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6132 tr, &tracing_total_entries_fops); 6114 tr, &tracing_total_entries_fops);
@@ -6141,11 +6123,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6141 &trace_clock_fops); 6123 &trace_clock_fops);
6142 6124
6143 trace_create_file("tracing_on", 0644, d_tracer, 6125 trace_create_file("tracing_on", 0644, d_tracer,
6144 tr, &rb_simple_fops); 6126 tr, &rb_simple_fops);
6145 6127
6146#ifdef CONFIG_TRACER_SNAPSHOT 6128#ifdef CONFIG_TRACER_SNAPSHOT
6147 trace_create_file("snapshot", 0644, d_tracer, 6129 trace_create_file("snapshot", 0644, d_tracer,
6148 (void *)&tr->trace_cpu, &snapshot_fops); 6130 tr, &snapshot_fops);
6149#endif 6131#endif
6150 6132
6151 for_each_tracing_cpu(cpu) 6133 for_each_tracing_cpu(cpu)
@@ -6439,10 +6421,6 @@ __init static int tracer_alloc_buffers(void)
6439 6421
6440 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 6422 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6441 6423
6442 /* Holder for file callbacks */
6443 global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6444 global_trace.trace_cpu.tr = &global_trace;
6445
6446 INIT_LIST_HEAD(&global_trace.systems); 6424 INIT_LIST_HEAD(&global_trace.systems);
6447 INIT_LIST_HEAD(&global_trace.events); 6425 INIT_LIST_HEAD(&global_trace.events);
6448 list_add(&global_trace.list, &ftrace_trace_arrays); 6426 list_add(&global_trace.list, &ftrace_trace_arrays);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4a4f6e1828b6..afaae41b0a02 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -130,19 +130,12 @@ enum trace_flag_type {
130 130
131struct trace_array; 131struct trace_array;
132 132
133struct trace_cpu {
134 struct trace_array *tr;
135 struct dentry *dir;
136 int cpu;
137};
138
139/* 133/*
140 * The CPU trace array - it consists of thousands of trace entries 134 * The CPU trace array - it consists of thousands of trace entries
141 * plus some other descriptor data: (for example which task started 135 * plus some other descriptor data: (for example which task started
142 * the trace, etc.) 136 * the trace, etc.)
143 */ 137 */
144struct trace_array_cpu { 138struct trace_array_cpu {
145 struct trace_cpu trace_cpu;
146 atomic_t disabled; 139 atomic_t disabled;
147 void *buffer_page; /* ring buffer spare */ 140 void *buffer_page; /* ring buffer spare */
148 141
@@ -196,7 +189,6 @@ struct trace_array {
196 bool allocated_snapshot; 189 bool allocated_snapshot;
197#endif 190#endif
198 int buffer_disabled; 191 int buffer_disabled;
199 struct trace_cpu trace_cpu; /* place holder */
200#ifdef CONFIG_FTRACE_SYSCALLS 192#ifdef CONFIG_FTRACE_SYSCALLS
201 int sys_refcount_enter; 193 int sys_refcount_enter;
202 int sys_refcount_exit; 194 int sys_refcount_exit;
@@ -214,7 +206,6 @@ struct trace_array {
214 struct dentry *event_dir; 206 struct dentry *event_dir;
215 struct list_head systems; 207 struct list_head systems;
216 struct list_head events; 208 struct list_head events;
217 struct task_struct *waiter;
218 int ref; 209 int ref;
219}; 210};
220 211
@@ -680,6 +671,15 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
680 struct trace_array *tr); 671 struct trace_array *tr);
681extern int trace_selftest_startup_branch(struct tracer *trace, 672extern int trace_selftest_startup_branch(struct tracer *trace,
682 struct trace_array *tr); 673 struct trace_array *tr);
674/*
675 * Tracer data references selftest functions that only occur
676 * on boot up. These can be __init functions. Thus, when selftests
677 * are enabled, then the tracers need to reference __init functions.
678 */
679#define __tracer_data __refdata
680#else
681/* Tracers are seldom changed. Optimize when selftests are disabled. */
682#define __tracer_data __read_mostly
683#endif /* CONFIG_FTRACE_STARTUP_TEST */ 683#endif /* CONFIG_FTRACE_STARTUP_TEST */
684 684
685extern void *head_page(struct trace_array_cpu *data); 685extern void *head_page(struct trace_array_cpu *data);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 84b1e045faba..80c36bcf66e8 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -236,6 +236,10 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
236 236
237 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); 237 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
238 238
239 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
240 "perf buffer not large enough"))
241 return NULL;
242
239 pc = preempt_count(); 243 pc = preempt_count();
240 244
241 *rctxp = perf_swevent_get_recursion_context(); 245 *rctxp = perf_swevent_get_recursion_context();
@@ -266,6 +270,10 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
266 struct pt_regs regs; 270 struct pt_regs regs;
267 int rctx; 271 int rctx;
268 272
273 head = this_cpu_ptr(event_function.perf_events);
274 if (hlist_empty(head))
275 return;
276
269#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ 277#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
270 sizeof(u64)) - sizeof(u32)) 278 sizeof(u64)) - sizeof(u32))
271 279
@@ -279,8 +287,6 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
279 287
280 entry->ip = ip; 288 entry->ip = ip;
281 entry->parent_ip = parent_ip; 289 entry->parent_ip = parent_ip;
282
283 head = this_cpu_ptr(event_function.perf_events);
284 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, 290 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
285 1, &regs, head, NULL); 291 1, &regs, head, NULL);
286 292
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7d854290bf81..29a7ebcfb426 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -409,33 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
409 mutex_unlock(&event_mutex); 409 mutex_unlock(&event_mutex);
410} 410}
411 411
412/* 412static void remove_subsystem(struct ftrace_subsystem_dir *dir)
413 * Open and update trace_array ref count.
414 * Must have the current trace_array passed to it.
415 */
416static int tracing_open_generic_file(struct inode *inode, struct file *filp)
417{ 413{
418 struct ftrace_event_file *file = inode->i_private; 414 if (!dir)
419 struct trace_array *tr = file->tr; 415 return;
420 int ret;
421 416
422 if (trace_array_get(tr) < 0) 417 if (!--dir->nr_events) {
423 return -ENODEV; 418 debugfs_remove_recursive(dir->entry);
419 list_del(&dir->list);
420 __put_system_dir(dir);
421 }
422}
424 423
425 ret = tracing_open_generic(inode, filp); 424static void *event_file_data(struct file *filp)
426 if (ret < 0) 425{
427 trace_array_put(tr); 426 return ACCESS_ONCE(file_inode(filp)->i_private);
428 return ret;
429} 427}
430 428
431static int tracing_release_generic_file(struct inode *inode, struct file *filp) 429static void remove_event_file_dir(struct ftrace_event_file *file)
432{ 430{
433 struct ftrace_event_file *file = inode->i_private; 431 struct dentry *dir = file->dir;
434 struct trace_array *tr = file->tr; 432 struct dentry *child;
435 433
436 trace_array_put(tr); 434 if (dir) {
435 spin_lock(&dir->d_lock); /* probably unneeded */
436 list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
437 if (child->d_inode) /* probably unneeded */
438 child->d_inode->i_private = NULL;
439 }
440 spin_unlock(&dir->d_lock);
437 441
438 return 0; 442 debugfs_remove_recursive(dir);
443 }
444
445 list_del(&file->list);
446 remove_subsystem(file->system);
447 kmem_cache_free(file_cachep, file);
439} 448}
440 449
441/* 450/*
@@ -679,15 +688,25 @@ static ssize_t
679event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 688event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
680 loff_t *ppos) 689 loff_t *ppos)
681{ 690{
682 struct ftrace_event_file *file = filp->private_data; 691 struct ftrace_event_file *file;
692 unsigned long flags;
683 char buf[4] = "0"; 693 char buf[4] = "0";
684 694
685 if (file->flags & FTRACE_EVENT_FL_ENABLED && 695 mutex_lock(&event_mutex);
686 !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) 696 file = event_file_data(filp);
697 if (likely(file))
698 flags = file->flags;
699 mutex_unlock(&event_mutex);
700
701 if (!file)
702 return -ENODEV;
703
704 if (flags & FTRACE_EVENT_FL_ENABLED &&
705 !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
687 strcpy(buf, "1"); 706 strcpy(buf, "1");
688 707
689 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED || 708 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
690 file->flags & FTRACE_EVENT_FL_SOFT_MODE) 709 flags & FTRACE_EVENT_FL_SOFT_MODE)
691 strcat(buf, "*"); 710 strcat(buf, "*");
692 711
693 strcat(buf, "\n"); 712 strcat(buf, "\n");
@@ -699,13 +718,10 @@ static ssize_t
699event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 718event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
700 loff_t *ppos) 719 loff_t *ppos)
701{ 720{
702 struct ftrace_event_file *file = filp->private_data; 721 struct ftrace_event_file *file;
703 unsigned long val; 722 unsigned long val;
704 int ret; 723 int ret;
705 724
706 if (!file)
707 return -EINVAL;
708
709 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 725 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
710 if (ret) 726 if (ret)
711 return ret; 727 return ret;
@@ -717,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
717 switch (val) { 733 switch (val) {
718 case 0: 734 case 0:
719 case 1: 735 case 1:
736 ret = -ENODEV;
720 mutex_lock(&event_mutex); 737 mutex_lock(&event_mutex);
721 ret = ftrace_event_enable_disable(file, val); 738 file = event_file_data(filp);
739 if (likely(file))
740 ret = ftrace_event_enable_disable(file, val);
722 mutex_unlock(&event_mutex); 741 mutex_unlock(&event_mutex);
723 break; 742 break;
724 743
@@ -825,65 +844,39 @@ enum {
825 844
826static void *f_next(struct seq_file *m, void *v, loff_t *pos) 845static void *f_next(struct seq_file *m, void *v, loff_t *pos)
827{ 846{
828 struct ftrace_event_call *call = m->private; 847 struct ftrace_event_call *call = event_file_data(m->private);
829 struct ftrace_event_field *field;
830 struct list_head *common_head = &ftrace_common_fields; 848 struct list_head *common_head = &ftrace_common_fields;
831 struct list_head *head = trace_get_fields(call); 849 struct list_head *head = trace_get_fields(call);
850 struct list_head *node = v;
832 851
833 (*pos)++; 852 (*pos)++;
834 853
835 switch ((unsigned long)v) { 854 switch ((unsigned long)v) {
836 case FORMAT_HEADER: 855 case FORMAT_HEADER:
837 if (unlikely(list_empty(common_head))) 856 node = common_head;
838 return NULL; 857 break;
839
840 field = list_entry(common_head->prev,
841 struct ftrace_event_field, link);
842 return field;
843 858
844 case FORMAT_FIELD_SEPERATOR: 859 case FORMAT_FIELD_SEPERATOR:
845 if (unlikely(list_empty(head))) 860 node = head;
846 return NULL; 861 break;
847
848 field = list_entry(head->prev, struct ftrace_event_field, link);
849 return field;
850 862
851 case FORMAT_PRINTFMT: 863 case FORMAT_PRINTFMT:
852 /* all done */ 864 /* all done */
853 return NULL; 865 return NULL;
854 } 866 }
855 867
856 field = v; 868 node = node->prev;
857 if (field->link.prev == common_head) 869 if (node == common_head)
858 return (void *)FORMAT_FIELD_SEPERATOR; 870 return (void *)FORMAT_FIELD_SEPERATOR;
859 else if (field->link.prev == head) 871 else if (node == head)
860 return (void *)FORMAT_PRINTFMT; 872 return (void *)FORMAT_PRINTFMT;
861 873 else
862 field = list_entry(field->link.prev, struct ftrace_event_field, link); 874 return node;
863
864 return field;
865}
866
867static void *f_start(struct seq_file *m, loff_t *pos)
868{
869 loff_t l = 0;
870 void *p;
871
872 /* Start by showing the header */
873 if (!*pos)
874 return (void *)FORMAT_HEADER;
875
876 p = (void *)FORMAT_HEADER;
877 do {
878 p = f_next(m, p, &l);
879 } while (p && l < *pos);
880
881 return p;
882} 875}
883 876
884static int f_show(struct seq_file *m, void *v) 877static int f_show(struct seq_file *m, void *v)
885{ 878{
886 struct ftrace_event_call *call = m->private; 879 struct ftrace_event_call *call = event_file_data(m->private);
887 struct ftrace_event_field *field; 880 struct ftrace_event_field *field;
888 const char *array_descriptor; 881 const char *array_descriptor;
889 882
@@ -904,8 +897,7 @@ static int f_show(struct seq_file *m, void *v)
904 return 0; 897 return 0;
905 } 898 }
906 899
907 field = v; 900 field = list_entry(v, struct ftrace_event_field, link);
908
909 /* 901 /*
910 * Smartly shows the array type(except dynamic array). 902 * Smartly shows the array type(except dynamic array).
911 * Normal: 903 * Normal:
@@ -932,8 +924,25 @@ static int f_show(struct seq_file *m, void *v)
932 return 0; 924 return 0;
933} 925}
934 926
927static void *f_start(struct seq_file *m, loff_t *pos)
928{
929 void *p = (void *)FORMAT_HEADER;
930 loff_t l = 0;
931
932 /* ->stop() is called even if ->start() fails */
933 mutex_lock(&event_mutex);
934 if (!event_file_data(m->private))
935 return ERR_PTR(-ENODEV);
936
937 while (l < *pos && p)
938 p = f_next(m, p, &l);
939
940 return p;
941}
942
935static void f_stop(struct seq_file *m, void *p) 943static void f_stop(struct seq_file *m, void *p)
936{ 944{
945 mutex_unlock(&event_mutex);
937} 946}
938 947
939static const struct seq_operations trace_format_seq_ops = { 948static const struct seq_operations trace_format_seq_ops = {
@@ -945,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = {
945 954
946static int trace_format_open(struct inode *inode, struct file *file) 955static int trace_format_open(struct inode *inode, struct file *file)
947{ 956{
948 struct ftrace_event_call *call = inode->i_private;
949 struct seq_file *m; 957 struct seq_file *m;
950 int ret; 958 int ret;
951 959
@@ -954,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
954 return ret; 962 return ret;
955 963
956 m = file->private_data; 964 m = file->private_data;
957 m->private = call; 965 m->private = file;
958 966
959 return 0; 967 return 0;
960} 968}
@@ -962,45 +970,47 @@ static int trace_format_open(struct inode *inode, struct file *file)
962static ssize_t 970static ssize_t
963event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 971event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
964{ 972{
965 struct ftrace_event_call *call = filp->private_data; 973 int id = (long)event_file_data(filp);
966 struct trace_seq *s; 974 char buf[32];
967 int r; 975 int len;
968 976
969 if (*ppos) 977 if (*ppos)
970 return 0; 978 return 0;
971 979
972 s = kmalloc(sizeof(*s), GFP_KERNEL); 980 if (unlikely(!id))
973 if (!s) 981 return -ENODEV;
974 return -ENOMEM;
975 982
976 trace_seq_init(s); 983 len = sprintf(buf, "%d\n", id);
977 trace_seq_printf(s, "%d\n", call->event.type);
978 984
979 r = simple_read_from_buffer(ubuf, cnt, ppos, 985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
980 s->buffer, s->len);
981 kfree(s);
982 return r;
983} 986}
984 987
985static ssize_t 988static ssize_t
986event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 989event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
987 loff_t *ppos) 990 loff_t *ppos)
988{ 991{
989 struct ftrace_event_call *call = filp->private_data; 992 struct ftrace_event_call *call;
990 struct trace_seq *s; 993 struct trace_seq *s;
991 int r; 994 int r = -ENODEV;
992 995
993 if (*ppos) 996 if (*ppos)
994 return 0; 997 return 0;
995 998
996 s = kmalloc(sizeof(*s), GFP_KERNEL); 999 s = kmalloc(sizeof(*s), GFP_KERNEL);
1000
997 if (!s) 1001 if (!s)
998 return -ENOMEM; 1002 return -ENOMEM;
999 1003
1000 trace_seq_init(s); 1004 trace_seq_init(s);
1001 1005
1002 print_event_filter(call, s); 1006 mutex_lock(&event_mutex);
1003 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1007 call = event_file_data(filp);
1008 if (call)
1009 print_event_filter(call, s);
1010 mutex_unlock(&event_mutex);
1011
1012 if (call)
1013 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1004 1014
1005 kfree(s); 1015 kfree(s);
1006 1016
@@ -1011,9 +1021,9 @@ static ssize_t
1011event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1021event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1012 loff_t *ppos) 1022 loff_t *ppos)
1013{ 1023{
1014 struct ftrace_event_call *call = filp->private_data; 1024 struct ftrace_event_call *call;
1015 char *buf; 1025 char *buf;
1016 int err; 1026 int err = -ENODEV;
1017 1027
1018 if (cnt >= PAGE_SIZE) 1028 if (cnt >= PAGE_SIZE)
1019 return -EINVAL; 1029 return -EINVAL;
@@ -1028,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1028 } 1038 }
1029 buf[cnt] = '\0'; 1039 buf[cnt] = '\0';
1030 1040
1031 err = apply_event_filter(call, buf); 1041 mutex_lock(&event_mutex);
1042 call = event_file_data(filp);
1043 if (call)
1044 err = apply_event_filter(call, buf);
1045 mutex_unlock(&event_mutex);
1046
1032 free_page((unsigned long) buf); 1047 free_page((unsigned long) buf);
1033 if (err < 0) 1048 if (err < 0)
1034 return err; 1049 return err;
@@ -1218,6 +1233,7 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1218 1233
1219static int ftrace_event_avail_open(struct inode *inode, struct file *file); 1234static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1220static int ftrace_event_set_open(struct inode *inode, struct file *file); 1235static int ftrace_event_set_open(struct inode *inode, struct file *file);
1236static int ftrace_event_release(struct inode *inode, struct file *file);
1221 1237
1222static const struct seq_operations show_event_seq_ops = { 1238static const struct seq_operations show_event_seq_ops = {
1223 .start = t_start, 1239 .start = t_start,
@@ -1245,14 +1261,13 @@ static const struct file_operations ftrace_set_event_fops = {
1245 .read = seq_read, 1261 .read = seq_read,
1246 .write = ftrace_event_write, 1262 .write = ftrace_event_write,
1247 .llseek = seq_lseek, 1263 .llseek = seq_lseek,
1248 .release = seq_release, 1264 .release = ftrace_event_release,
1249}; 1265};
1250 1266
1251static const struct file_operations ftrace_enable_fops = { 1267static const struct file_operations ftrace_enable_fops = {
1252 .open = tracing_open_generic_file, 1268 .open = tracing_open_generic,
1253 .read = event_enable_read, 1269 .read = event_enable_read,
1254 .write = event_enable_write, 1270 .write = event_enable_write,
1255 .release = tracing_release_generic_file,
1256 .llseek = default_llseek, 1271 .llseek = default_llseek,
1257}; 1272};
1258 1273
@@ -1264,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = {
1264}; 1279};
1265 1280
1266static const struct file_operations ftrace_event_id_fops = { 1281static const struct file_operations ftrace_event_id_fops = {
1267 .open = tracing_open_generic,
1268 .read = event_id_read, 1282 .read = event_id_read,
1269 .llseek = default_llseek, 1283 .llseek = default_llseek,
1270}; 1284};
@@ -1323,6 +1337,15 @@ ftrace_event_open(struct inode *inode, struct file *file,
1323 return ret; 1337 return ret;
1324} 1338}
1325 1339
1340static int ftrace_event_release(struct inode *inode, struct file *file)
1341{
1342 struct trace_array *tr = inode->i_private;
1343
1344 trace_array_put(tr);
1345
1346 return seq_release(inode, file);
1347}
1348
1326static int 1349static int
1327ftrace_event_avail_open(struct inode *inode, struct file *file) 1350ftrace_event_avail_open(struct inode *inode, struct file *file)
1328{ 1351{
@@ -1336,12 +1359,19 @@ ftrace_event_set_open(struct inode *inode, struct file *file)
1336{ 1359{
1337 const struct seq_operations *seq_ops = &show_set_event_seq_ops; 1360 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1338 struct trace_array *tr = inode->i_private; 1361 struct trace_array *tr = inode->i_private;
1362 int ret;
1363
1364 if (trace_array_get(tr) < 0)
1365 return -ENODEV;
1339 1366
1340 if ((file->f_mode & FMODE_WRITE) && 1367 if ((file->f_mode & FMODE_WRITE) &&
1341 (file->f_flags & O_TRUNC)) 1368 (file->f_flags & O_TRUNC))
1342 ftrace_clear_events(tr); 1369 ftrace_clear_events(tr);
1343 1370
1344 return ftrace_event_open(inode, file, seq_ops); 1371 ret = ftrace_event_open(inode, file, seq_ops);
1372 if (ret < 0)
1373 trace_array_put(tr);
1374 return ret;
1345} 1375}
1346 1376
1347static struct event_subsystem * 1377static struct event_subsystem *
@@ -1496,8 +1526,8 @@ event_create_dir(struct dentry *parent,
1496 1526
1497#ifdef CONFIG_PERF_EVENTS 1527#ifdef CONFIG_PERF_EVENTS
1498 if (call->event.type && call->class->reg) 1528 if (call->event.type && call->class->reg)
1499 trace_create_file("id", 0444, file->dir, call, 1529 trace_create_file("id", 0444, file->dir,
1500 id); 1530 (void *)(long)call->event.type, id);
1501#endif 1531#endif
1502 1532
1503 /* 1533 /*
@@ -1522,33 +1552,16 @@ event_create_dir(struct dentry *parent,
1522 return 0; 1552 return 0;
1523} 1553}
1524 1554
1525static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1526{
1527 if (!dir)
1528 return;
1529
1530 if (!--dir->nr_events) {
1531 debugfs_remove_recursive(dir->entry);
1532 list_del(&dir->list);
1533 __put_system_dir(dir);
1534 }
1535}
1536
1537static void remove_event_from_tracers(struct ftrace_event_call *call) 1555static void remove_event_from_tracers(struct ftrace_event_call *call)
1538{ 1556{
1539 struct ftrace_event_file *file; 1557 struct ftrace_event_file *file;
1540 struct trace_array *tr; 1558 struct trace_array *tr;
1541 1559
1542 do_for_each_event_file_safe(tr, file) { 1560 do_for_each_event_file_safe(tr, file) {
1543
1544 if (file->event_call != call) 1561 if (file->event_call != call)
1545 continue; 1562 continue;
1546 1563
1547 list_del(&file->list); 1564 remove_event_file_dir(file);
1548 debugfs_remove_recursive(file->dir);
1549 remove_subsystem(file->system);
1550 kmem_cache_free(file_cachep, file);
1551
1552 /* 1565 /*
1553 * The do_for_each_event_file_safe() is 1566 * The do_for_each_event_file_safe() is
1554 * a double loop. After finding the call for this 1567 * a double loop. After finding the call for this
@@ -1700,16 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
1700 destroy_preds(call); 1713 destroy_preds(call);
1701} 1714}
1702 1715
1716static int probe_remove_event_call(struct ftrace_event_call *call)
1717{
1718 struct trace_array *tr;
1719 struct ftrace_event_file *file;
1720
1721#ifdef CONFIG_PERF_EVENTS
1722 if (call->perf_refcount)
1723 return -EBUSY;
1724#endif
1725 do_for_each_event_file(tr, file) {
1726 if (file->event_call != call)
1727 continue;
1728 /*
1729 * We can't rely on ftrace_event_enable_disable(enable => 0)
1730 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1731 * TRACE_REG_UNREGISTER.
1732 */
1733 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1734 return -EBUSY;
1735 /*
1736 * The do_for_each_event_file_safe() is
1737 * a double loop. After finding the call for this
1738 * trace_array, we use break to jump to the next
1739 * trace_array.
1740 */
1741 break;
1742 } while_for_each_event_file();
1743
1744 __trace_remove_event_call(call);
1745
1746 return 0;
1747}
1748
1703/* Remove an event_call */ 1749/* Remove an event_call */
1704void trace_remove_event_call(struct ftrace_event_call *call) 1750int trace_remove_event_call(struct ftrace_event_call *call)
1705{ 1751{
1752 int ret;
1753
1706 mutex_lock(&trace_types_lock); 1754 mutex_lock(&trace_types_lock);
1707 mutex_lock(&event_mutex); 1755 mutex_lock(&event_mutex);
1708 down_write(&trace_event_sem); 1756 down_write(&trace_event_sem);
1709 __trace_remove_event_call(call); 1757 ret = probe_remove_event_call(call);
1710 up_write(&trace_event_sem); 1758 up_write(&trace_event_sem);
1711 mutex_unlock(&event_mutex); 1759 mutex_unlock(&event_mutex);
1712 mutex_unlock(&trace_types_lock); 1760 mutex_unlock(&trace_types_lock);
1761
1762 return ret;
1713} 1763}
1714 1764
1715#define for_each_event(event, start, end) \ 1765#define for_each_event(event, start, end) \
@@ -2278,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr)
2278{ 2328{
2279 struct ftrace_event_file *file, *next; 2329 struct ftrace_event_file *file, *next;
2280 2330
2281 list_for_each_entry_safe(file, next, &tr->events, list) { 2331 list_for_each_entry_safe(file, next, &tr->events, list)
2282 list_del(&file->list); 2332 remove_event_file_dir(file);
2283 debugfs_remove_recursive(file->dir);
2284 remove_subsystem(file->system);
2285 kmem_cache_free(file_cachep, file);
2286 }
2287} 2333}
2288 2334
2289static void 2335static void
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0d883dc057d6..97daa8cf958d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps,
637 free_page((unsigned long) buf); 637 free_page((unsigned long) buf);
638} 638}
639 639
640/* caller must hold event_mutex */
640void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) 641void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
641{ 642{
642 struct event_filter *filter; 643 struct event_filter *filter = call->filter;
643 644
644 mutex_lock(&event_mutex);
645 filter = call->filter;
646 if (filter && filter->filter_string) 645 if (filter && filter->filter_string)
647 trace_seq_printf(s, "%s\n", filter->filter_string); 646 trace_seq_printf(s, "%s\n", filter->filter_string);
648 else 647 else
649 trace_seq_printf(s, "none\n"); 648 trace_seq_puts(s, "none\n");
650 mutex_unlock(&event_mutex);
651} 649}
652 650
653void print_subsystem_event_filter(struct event_subsystem *system, 651void print_subsystem_event_filter(struct event_subsystem *system,
@@ -660,7 +658,7 @@ void print_subsystem_event_filter(struct event_subsystem *system,
660 if (filter && filter->filter_string) 658 if (filter && filter->filter_string)
661 trace_seq_printf(s, "%s\n", filter->filter_string); 659 trace_seq_printf(s, "%s\n", filter->filter_string);
662 else 660 else
663 trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); 661 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
664 mutex_unlock(&event_mutex); 662 mutex_unlock(&event_mutex);
665} 663}
666 664
@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system,
1841 return err; 1839 return err;
1842} 1840}
1843 1841
1842/* caller must hold event_mutex */
1844int apply_event_filter(struct ftrace_event_call *call, char *filter_string) 1843int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1845{ 1844{
1846 struct event_filter *filter; 1845 struct event_filter *filter;
1847 int err = 0; 1846 int err;
1848
1849 mutex_lock(&event_mutex);
1850 1847
1851 if (!strcmp(strstrip(filter_string), "0")) { 1848 if (!strcmp(strstrip(filter_string), "0")) {
1852 filter_disable(call); 1849 filter_disable(call);
1853 filter = call->filter; 1850 filter = call->filter;
1854 if (!filter) 1851 if (!filter)
1855 goto out_unlock; 1852 return 0;
1856 RCU_INIT_POINTER(call->filter, NULL); 1853 RCU_INIT_POINTER(call->filter, NULL);
1857 /* Make sure the filter is not being used */ 1854 /* Make sure the filter is not being used */
1858 synchronize_sched(); 1855 synchronize_sched();
1859 __free_filter(filter); 1856 __free_filter(filter);
1860 goto out_unlock; 1857 return 0;
1861 } 1858 }
1862 1859
1863 err = create_filter(call, filter_string, true, &filter); 1860 err = create_filter(call, filter_string, true, &filter);
@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1884 __free_filter(tmp); 1881 __free_filter(tmp);
1885 } 1882 }
1886 } 1883 }
1887out_unlock:
1888 mutex_unlock(&event_mutex);
1889 1884
1890 return err; 1885 return err;
1891} 1886}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index b863f93b30f3..38fe1483c508 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -199,7 +199,7 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
199 return 0; 199 return 0;
200} 200}
201 201
202static struct tracer function_trace __read_mostly = 202static struct tracer function_trace __tracer_data =
203{ 203{
204 .name = "function", 204 .name = "function",
205 .init = function_trace_init, 205 .init = function_trace_init,
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 8388bc99f2ee..b5c09242683d 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -446,7 +446,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
446 446
447 /* First spaces to align center */ 447 /* First spaces to align center */
448 for (i = 0; i < spaces / 2; i++) { 448 for (i = 0; i < spaces / 2; i++) {
449 ret = trace_seq_printf(s, " "); 449 ret = trace_seq_putc(s, ' ');
450 if (!ret) 450 if (!ret)
451 return TRACE_TYPE_PARTIAL_LINE; 451 return TRACE_TYPE_PARTIAL_LINE;
452 } 452 }
@@ -457,7 +457,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
457 457
458 /* Last spaces to align center */ 458 /* Last spaces to align center */
459 for (i = 0; i < spaces - (spaces / 2); i++) { 459 for (i = 0; i < spaces - (spaces / 2); i++) {
460 ret = trace_seq_printf(s, " "); 460 ret = trace_seq_putc(s, ' ');
461 if (!ret) 461 if (!ret)
462 return TRACE_TYPE_PARTIAL_LINE; 462 return TRACE_TYPE_PARTIAL_LINE;
463 } 463 }
@@ -503,7 +503,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
503 ------------------------------------------ 503 ------------------------------------------
504 504
505 */ 505 */
506 ret = trace_seq_printf(s, 506 ret = trace_seq_puts(s,
507 " ------------------------------------------\n"); 507 " ------------------------------------------\n");
508 if (!ret) 508 if (!ret)
509 return TRACE_TYPE_PARTIAL_LINE; 509 return TRACE_TYPE_PARTIAL_LINE;
@@ -516,7 +516,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
516 if (ret == TRACE_TYPE_PARTIAL_LINE) 516 if (ret == TRACE_TYPE_PARTIAL_LINE)
517 return TRACE_TYPE_PARTIAL_LINE; 517 return TRACE_TYPE_PARTIAL_LINE;
518 518
519 ret = trace_seq_printf(s, " => "); 519 ret = trace_seq_puts(s, " => ");
520 if (!ret) 520 if (!ret)
521 return TRACE_TYPE_PARTIAL_LINE; 521 return TRACE_TYPE_PARTIAL_LINE;
522 522
@@ -524,7 +524,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
524 if (ret == TRACE_TYPE_PARTIAL_LINE) 524 if (ret == TRACE_TYPE_PARTIAL_LINE)
525 return TRACE_TYPE_PARTIAL_LINE; 525 return TRACE_TYPE_PARTIAL_LINE;
526 526
527 ret = trace_seq_printf(s, 527 ret = trace_seq_puts(s,
528 "\n ------------------------------------------\n\n"); 528 "\n ------------------------------------------\n\n");
529 if (!ret) 529 if (!ret)
530 return TRACE_TYPE_PARTIAL_LINE; 530 return TRACE_TYPE_PARTIAL_LINE;
@@ -645,7 +645,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
645 ret = print_graph_proc(s, pid); 645 ret = print_graph_proc(s, pid);
646 if (ret == TRACE_TYPE_PARTIAL_LINE) 646 if (ret == TRACE_TYPE_PARTIAL_LINE)
647 return TRACE_TYPE_PARTIAL_LINE; 647 return TRACE_TYPE_PARTIAL_LINE;
648 ret = trace_seq_printf(s, " | "); 648 ret = trace_seq_puts(s, " | ");
649 if (!ret) 649 if (!ret)
650 return TRACE_TYPE_PARTIAL_LINE; 650 return TRACE_TYPE_PARTIAL_LINE;
651 } 651 }
@@ -657,9 +657,9 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
657 return ret; 657 return ret;
658 658
659 if (type == TRACE_GRAPH_ENT) 659 if (type == TRACE_GRAPH_ENT)
660 ret = trace_seq_printf(s, "==========>"); 660 ret = trace_seq_puts(s, "==========>");
661 else 661 else
662 ret = trace_seq_printf(s, "<=========="); 662 ret = trace_seq_puts(s, "<==========");
663 663
664 if (!ret) 664 if (!ret)
665 return TRACE_TYPE_PARTIAL_LINE; 665 return TRACE_TYPE_PARTIAL_LINE;
@@ -668,7 +668,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
668 if (ret != TRACE_TYPE_HANDLED) 668 if (ret != TRACE_TYPE_HANDLED)
669 return ret; 669 return ret;
670 670
671 ret = trace_seq_printf(s, "\n"); 671 ret = trace_seq_putc(s, '\n');
672 672
673 if (!ret) 673 if (!ret)
674 return TRACE_TYPE_PARTIAL_LINE; 674 return TRACE_TYPE_PARTIAL_LINE;
@@ -705,13 +705,13 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
705 len += strlen(nsecs_str); 705 len += strlen(nsecs_str);
706 } 706 }
707 707
708 ret = trace_seq_printf(s, " us "); 708 ret = trace_seq_puts(s, " us ");
709 if (!ret) 709 if (!ret)
710 return TRACE_TYPE_PARTIAL_LINE; 710 return TRACE_TYPE_PARTIAL_LINE;
711 711
712 /* Print remaining spaces to fit the row's width */ 712 /* Print remaining spaces to fit the row's width */
713 for (i = len; i < 7; i++) { 713 for (i = len; i < 7; i++) {
714 ret = trace_seq_printf(s, " "); 714 ret = trace_seq_putc(s, ' ');
715 if (!ret) 715 if (!ret)
716 return TRACE_TYPE_PARTIAL_LINE; 716 return TRACE_TYPE_PARTIAL_LINE;
717 } 717 }
@@ -731,13 +731,13 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
731 /* No real adata, just filling the column with spaces */ 731 /* No real adata, just filling the column with spaces */
732 switch (duration) { 732 switch (duration) {
733 case DURATION_FILL_FULL: 733 case DURATION_FILL_FULL:
734 ret = trace_seq_printf(s, " | "); 734 ret = trace_seq_puts(s, " | ");
735 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 735 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
736 case DURATION_FILL_START: 736 case DURATION_FILL_START:
737 ret = trace_seq_printf(s, " "); 737 ret = trace_seq_puts(s, " ");
738 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 738 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
739 case DURATION_FILL_END: 739 case DURATION_FILL_END:
740 ret = trace_seq_printf(s, " |"); 740 ret = trace_seq_puts(s, " |");
741 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 741 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
742 } 742 }
743 743
@@ -745,10 +745,10 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
745 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { 745 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
746 /* Duration exceeded 100 msecs */ 746 /* Duration exceeded 100 msecs */
747 if (duration > 100000ULL) 747 if (duration > 100000ULL)
748 ret = trace_seq_printf(s, "! "); 748 ret = trace_seq_puts(s, "! ");
749 /* Duration exceeded 10 msecs */ 749 /* Duration exceeded 10 msecs */
750 else if (duration > 10000ULL) 750 else if (duration > 10000ULL)
751 ret = trace_seq_printf(s, "+ "); 751 ret = trace_seq_puts(s, "+ ");
752 } 752 }
753 753
754 /* 754 /*
@@ -757,7 +757,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
757 * to fill out the space. 757 * to fill out the space.
758 */ 758 */
759 if (ret == -1) 759 if (ret == -1)
760 ret = trace_seq_printf(s, " "); 760 ret = trace_seq_puts(s, " ");
761 761
762 /* Catching here any failure happenned above */ 762 /* Catching here any failure happenned above */
763 if (!ret) 763 if (!ret)
@@ -767,7 +767,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
767 if (ret != TRACE_TYPE_HANDLED) 767 if (ret != TRACE_TYPE_HANDLED)
768 return ret; 768 return ret;
769 769
770 ret = trace_seq_printf(s, "| "); 770 ret = trace_seq_puts(s, "| ");
771 if (!ret) 771 if (!ret)
772 return TRACE_TYPE_PARTIAL_LINE; 772 return TRACE_TYPE_PARTIAL_LINE;
773 773
@@ -817,7 +817,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
817 817
818 /* Function */ 818 /* Function */
819 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 819 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
820 ret = trace_seq_printf(s, " "); 820 ret = trace_seq_putc(s, ' ');
821 if (!ret) 821 if (!ret)
822 return TRACE_TYPE_PARTIAL_LINE; 822 return TRACE_TYPE_PARTIAL_LINE;
823 } 823 }
@@ -858,7 +858,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
858 858
859 /* Function */ 859 /* Function */
860 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 860 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
861 ret = trace_seq_printf(s, " "); 861 ret = trace_seq_putc(s, ' ');
862 if (!ret) 862 if (!ret)
863 return TRACE_TYPE_PARTIAL_LINE; 863 return TRACE_TYPE_PARTIAL_LINE;
864 } 864 }
@@ -917,7 +917,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
917 if (ret == TRACE_TYPE_PARTIAL_LINE) 917 if (ret == TRACE_TYPE_PARTIAL_LINE)
918 return TRACE_TYPE_PARTIAL_LINE; 918 return TRACE_TYPE_PARTIAL_LINE;
919 919
920 ret = trace_seq_printf(s, " | "); 920 ret = trace_seq_puts(s, " | ");
921 if (!ret) 921 if (!ret)
922 return TRACE_TYPE_PARTIAL_LINE; 922 return TRACE_TYPE_PARTIAL_LINE;
923 } 923 }
@@ -1117,7 +1117,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1117 1117
1118 /* Closing brace */ 1118 /* Closing brace */
1119 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { 1119 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1120 ret = trace_seq_printf(s, " "); 1120 ret = trace_seq_putc(s, ' ');
1121 if (!ret) 1121 if (!ret)
1122 return TRACE_TYPE_PARTIAL_LINE; 1122 return TRACE_TYPE_PARTIAL_LINE;
1123 } 1123 }
@@ -1129,7 +1129,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1129 * belongs to, write out the function name. 1129 * belongs to, write out the function name.
1130 */ 1130 */
1131 if (func_match) { 1131 if (func_match) {
1132 ret = trace_seq_printf(s, "}\n"); 1132 ret = trace_seq_puts(s, "}\n");
1133 if (!ret) 1133 if (!ret)
1134 return TRACE_TYPE_PARTIAL_LINE; 1134 return TRACE_TYPE_PARTIAL_LINE;
1135 } else { 1135 } else {
@@ -1179,13 +1179,13 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1179 /* Indentation */ 1179 /* Indentation */
1180 if (depth > 0) 1180 if (depth > 0)
1181 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { 1181 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1182 ret = trace_seq_printf(s, " "); 1182 ret = trace_seq_putc(s, ' ');
1183 if (!ret) 1183 if (!ret)
1184 return TRACE_TYPE_PARTIAL_LINE; 1184 return TRACE_TYPE_PARTIAL_LINE;
1185 } 1185 }
1186 1186
1187 /* The comment */ 1187 /* The comment */
1188 ret = trace_seq_printf(s, "/* "); 1188 ret = trace_seq_puts(s, "/* ");
1189 if (!ret) 1189 if (!ret)
1190 return TRACE_TYPE_PARTIAL_LINE; 1190 return TRACE_TYPE_PARTIAL_LINE;
1191 1191
@@ -1216,7 +1216,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1216 s->len--; 1216 s->len--;
1217 } 1217 }
1218 1218
1219 ret = trace_seq_printf(s, " */\n"); 1219 ret = trace_seq_puts(s, " */\n");
1220 if (!ret) 1220 if (!ret)
1221 return TRACE_TYPE_PARTIAL_LINE; 1221 return TRACE_TYPE_PARTIAL_LINE;
1222 1222
@@ -1448,7 +1448,7 @@ static struct trace_event graph_trace_ret_event = {
1448 .funcs = &graph_functions 1448 .funcs = &graph_functions
1449}; 1449};
1450 1450
1451static struct tracer graph_trace __read_mostly = { 1451static struct tracer graph_trace __tracer_data = {
1452 .name = "function_graph", 1452 .name = "function_graph",
1453 .open = graph_trace_open, 1453 .open = graph_trace_open,
1454 .pipe_open = graph_trace_open, 1454 .pipe_open = graph_trace_open,
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 7ed6976493c8..243f6834d026 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
95} 95}
96 96
97static int register_probe_event(struct trace_probe *tp); 97static int register_probe_event(struct trace_probe *tp);
98static void unregister_probe_event(struct trace_probe *tp); 98static int unregister_probe_event(struct trace_probe *tp);
99 99
100static DEFINE_MUTEX(probe_lock); 100static DEFINE_MUTEX(probe_lock);
101static LIST_HEAD(probe_list); 101static LIST_HEAD(probe_list);
@@ -243,11 +243,11 @@ find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
243static int 243static int
244disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) 244disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
245{ 245{
246 struct event_file_link *link = NULL;
247 int wait = 0;
246 int ret = 0; 248 int ret = 0;
247 249
248 if (file) { 250 if (file) {
249 struct event_file_link *link;
250
251 link = find_event_file_link(tp, file); 251 link = find_event_file_link(tp, file);
252 if (!link) { 252 if (!link) {
253 ret = -EINVAL; 253 ret = -EINVAL;
@@ -255,10 +255,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
255 } 255 }
256 256
257 list_del_rcu(&link->list); 257 list_del_rcu(&link->list);
258 /* synchronize with kprobe_trace_func/kretprobe_trace_func */ 258 wait = 1;
259 synchronize_sched();
260 kfree(link);
261
262 if (!list_empty(&tp->files)) 259 if (!list_empty(&tp->files))
263 goto out; 260 goto out;
264 261
@@ -271,8 +268,22 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
271 disable_kretprobe(&tp->rp); 268 disable_kretprobe(&tp->rp);
272 else 269 else
273 disable_kprobe(&tp->rp.kp); 270 disable_kprobe(&tp->rp.kp);
271 wait = 1;
274 } 272 }
275 out: 273 out:
274 if (wait) {
275 /*
276 * Synchronize with kprobe_trace_func/kretprobe_trace_func
277 * to ensure disabled (all running handlers are finished).
278 * This is not only for kfree(), but also the caller,
279 * trace_remove_event_call() supposes it for releasing
280 * event_call related objects, which will be accessed in
281 * the kprobe_trace_func/kretprobe_trace_func.
282 */
283 synchronize_sched();
284 kfree(link); /* Ignored if link == NULL */
285 }
286
276 return ret; 287 return ret;
277} 288}
278 289
@@ -340,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
340 if (trace_probe_is_enabled(tp)) 351 if (trace_probe_is_enabled(tp))
341 return -EBUSY; 352 return -EBUSY;
342 353
354 /* Will fail if probe is being used by ftrace or perf */
355 if (unregister_probe_event(tp))
356 return -EBUSY;
357
343 __unregister_trace_probe(tp); 358 __unregister_trace_probe(tp);
344 list_del(&tp->list); 359 list_del(&tp->list);
345 unregister_probe_event(tp);
346 360
347 return 0; 361 return 0;
348} 362}
@@ -621,7 +635,9 @@ static int release_all_trace_probes(void)
621 /* TODO: Use batch unregistration */ 635 /* TODO: Use batch unregistration */
622 while (!list_empty(&probe_list)) { 636 while (!list_empty(&probe_list)) {
623 tp = list_entry(probe_list.next, struct trace_probe, list); 637 tp = list_entry(probe_list.next, struct trace_probe, list);
624 unregister_trace_probe(tp); 638 ret = unregister_trace_probe(tp);
639 if (ret)
640 goto end;
625 free_trace_probe(tp); 641 free_trace_probe(tp);
626 } 642 }
627 643
@@ -1087,9 +1103,6 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
1087 __size = sizeof(*entry) + tp->size + dsize; 1103 __size = sizeof(*entry) + tp->size + dsize;
1088 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1104 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1089 size -= sizeof(u32); 1105 size -= sizeof(u32);
1090 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1091 "profile buffer not large enough"))
1092 return;
1093 1106
1094 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1107 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1095 if (!entry) 1108 if (!entry)
@@ -1120,9 +1133,6 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
1120 __size = sizeof(*entry) + tp->size + dsize; 1133 __size = sizeof(*entry) + tp->size + dsize;
1121 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1134 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1122 size -= sizeof(u32); 1135 size -= sizeof(u32);
1123 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1124 "profile buffer not large enough"))
1125 return;
1126 1136
1127 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1137 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1128 if (!entry) 1138 if (!entry)
@@ -1242,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp)
1242 return ret; 1252 return ret;
1243} 1253}
1244 1254
1245static void unregister_probe_event(struct trace_probe *tp) 1255static int unregister_probe_event(struct trace_probe *tp)
1246{ 1256{
1257 int ret;
1258
1247 /* tp->event is unregistered in trace_remove_event_call() */ 1259 /* tp->event is unregistered in trace_remove_event_call() */
1248 trace_remove_event_call(&tp->call); 1260 ret = trace_remove_event_call(&tp->call);
1249 kfree(tp->call.print_fmt); 1261 if (!ret)
1262 kfree(tp->call.print_fmt);
1263 return ret;
1250} 1264}
1251 1265
1252/* Make a debugfs interface for controlling probe points */ 1266/* Make a debugfs interface for controlling probe points */
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index a5e8f4878bfa..b3dcfb2f0fef 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -90,7 +90,7 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
90 if (drv) 90 if (drv)
91 ret += trace_seq_printf(s, " %s\n", drv->name); 91 ret += trace_seq_printf(s, " %s\n", drv->name);
92 else 92 else
93 ret += trace_seq_printf(s, " \n"); 93 ret += trace_seq_puts(s, " \n");
94 return ret; 94 return ret;
95} 95}
96 96
@@ -107,7 +107,7 @@ static void mmio_pipe_open(struct trace_iterator *iter)
107 struct header_iter *hiter; 107 struct header_iter *hiter;
108 struct trace_seq *s = &iter->seq; 108 struct trace_seq *s = &iter->seq;
109 109
110 trace_seq_printf(s, "VERSION 20070824\n"); 110 trace_seq_puts(s, "VERSION 20070824\n");
111 111
112 hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); 112 hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
113 if (!hiter) 113 if (!hiter)
@@ -209,7 +209,7 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
209 (rw->value >> 0) & 0xff, rw->pc, 0); 209 (rw->value >> 0) & 0xff, rw->pc, 0);
210 break; 210 break;
211 default: 211 default:
212 ret = trace_seq_printf(s, "rw what?\n"); 212 ret = trace_seq_puts(s, "rw what?\n");
213 break; 213 break;
214 } 214 }
215 if (ret) 215 if (ret)
@@ -245,7 +245,7 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter)
245 secs, usec_rem, m->map_id, 0UL, 0); 245 secs, usec_rem, m->map_id, 0UL, 0);
246 break; 246 break;
247 default: 247 default:
248 ret = trace_seq_printf(s, "map what?\n"); 248 ret = trace_seq_puts(s, "map what?\n");
249 break; 249 break;
250 } 250 }
251 if (ret) 251 if (ret)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index bb922d9ee51b..34e7cbac0c9c 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -78,7 +78,7 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
78 78
79 trace_assign_type(field, entry); 79 trace_assign_type(field, entry);
80 80
81 ret = trace_seq_printf(s, "%s", field->buf); 81 ret = trace_seq_puts(s, field->buf);
82 if (!ret) 82 if (!ret)
83 return TRACE_TYPE_PARTIAL_LINE; 83 return TRACE_TYPE_PARTIAL_LINE;
84 84
@@ -558,14 +558,14 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
558 if (ret) 558 if (ret)
559 ret = trace_seq_puts(s, "??"); 559 ret = trace_seq_puts(s, "??");
560 if (ret) 560 if (ret)
561 ret = trace_seq_puts(s, "\n"); 561 ret = trace_seq_putc(s, '\n');
562 continue; 562 continue;
563 } 563 }
564 if (!ret) 564 if (!ret)
565 break; 565 break;
566 if (ret) 566 if (ret)
567 ret = seq_print_user_ip(s, mm, ip, sym_flags); 567 ret = seq_print_user_ip(s, mm, ip, sym_flags);
568 ret = trace_seq_puts(s, "\n"); 568 ret = trace_seq_putc(s, '\n');
569 } 569 }
570 570
571 if (mm) 571 if (mm)
@@ -579,7 +579,7 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
579 int ret; 579 int ret;
580 580
581 if (!ip) 581 if (!ip)
582 return trace_seq_printf(s, "0"); 582 return trace_seq_putc(s, '0');
583 583
584 if (sym_flags & TRACE_ITER_SYM_OFFSET) 584 if (sym_flags & TRACE_ITER_SYM_OFFSET)
585 ret = seq_print_sym_offset(s, "%s", ip); 585 ret = seq_print_sym_offset(s, "%s", ip);
@@ -964,14 +964,14 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
964 goto partial; 964 goto partial;
965 965
966 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { 966 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
967 if (!trace_seq_printf(s, " <-")) 967 if (!trace_seq_puts(s, " <-"))
968 goto partial; 968 goto partial;
969 if (!seq_print_ip_sym(s, 969 if (!seq_print_ip_sym(s,
970 field->parent_ip, 970 field->parent_ip,
971 flags)) 971 flags))
972 goto partial; 972 goto partial;
973 } 973 }
974 if (!trace_seq_printf(s, "\n")) 974 if (!trace_seq_putc(s, '\n'))
975 goto partial; 975 goto partial;
976 976
977 return TRACE_TYPE_HANDLED; 977 return TRACE_TYPE_HANDLED;
@@ -1210,7 +1210,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1210 1210
1211 if (!seq_print_ip_sym(s, *p, flags)) 1211 if (!seq_print_ip_sym(s, *p, flags))
1212 goto partial; 1212 goto partial;
1213 if (!trace_seq_puts(s, "\n")) 1213 if (!trace_seq_putc(s, '\n'))
1214 goto partial; 1214 goto partial;
1215 } 1215 }
1216 1216
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 322e16461072..8fd03657bc7d 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -175,7 +175,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
175 entry = syscall_nr_to_meta(syscall); 175 entry = syscall_nr_to_meta(syscall);
176 176
177 if (!entry) { 177 if (!entry) {
178 trace_seq_printf(s, "\n"); 178 trace_seq_putc(s, '\n');
179 return TRACE_TYPE_HANDLED; 179 return TRACE_TYPE_HANDLED;
180 } 180 }
181 181
@@ -566,15 +566,15 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
566 if (!sys_data) 566 if (!sys_data)
567 return; 567 return;
568 568
569 head = this_cpu_ptr(sys_data->enter_event->perf_events);
570 if (hlist_empty(head))
571 return;
572
569 /* get the size after alignment with the u32 buffer size field */ 573 /* get the size after alignment with the u32 buffer size field */
570 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); 574 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
571 size = ALIGN(size + sizeof(u32), sizeof(u64)); 575 size = ALIGN(size + sizeof(u32), sizeof(u64));
572 size -= sizeof(u32); 576 size -= sizeof(u32);
573 577
574 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
575 "perf buffer not large enough"))
576 return;
577
578 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, 578 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
579 sys_data->enter_event->event.type, regs, &rctx); 579 sys_data->enter_event->event.type, regs, &rctx);
580 if (!rec) 580 if (!rec)
@@ -583,8 +583,6 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
583 rec->nr = syscall_nr; 583 rec->nr = syscall_nr;
584 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 584 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
585 (unsigned long *)&rec->args); 585 (unsigned long *)&rec->args);
586
587 head = this_cpu_ptr(sys_data->enter_event->perf_events);
588 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 586 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
589} 587}
590 588
@@ -642,18 +640,14 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
642 if (!sys_data) 640 if (!sys_data)
643 return; 641 return;
644 642
643 head = this_cpu_ptr(sys_data->exit_event->perf_events);
644 if (hlist_empty(head))
645 return;
646
645 /* We can probably do that at build time */ 647 /* We can probably do that at build time */
646 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); 648 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
647 size -= sizeof(u32); 649 size -= sizeof(u32);
648 650
649 /*
650 * Impossible, but be paranoid with the future
651 * How to put this check outside runtime?
652 */
653 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
654 "exit event has grown above perf buffer size"))
655 return;
656
657 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, 651 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
658 sys_data->exit_event->event.type, regs, &rctx); 652 sys_data->exit_event->event.type, regs, &rctx);
659 if (!rec) 653 if (!rec)
@@ -661,8 +655,6 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
661 655
662 rec->nr = syscall_nr; 656 rec->nr = syscall_nr;
663 rec->ret = syscall_get_return_value(current, regs); 657 rec->ret = syscall_get_return_value(current, regs);
664
665 head = this_cpu_ptr(sys_data->exit_event->perf_events);
666 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 658 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
667} 659}
668 660
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index d5d0cd368a56..272261b5f94f 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -70,7 +70,7 @@ struct trace_uprobe {
70 (sizeof(struct probe_arg) * (n))) 70 (sizeof(struct probe_arg) * (n)))
71 71
72static int register_uprobe_event(struct trace_uprobe *tu); 72static int register_uprobe_event(struct trace_uprobe *tu);
73static void unregister_uprobe_event(struct trace_uprobe *tu); 73static int unregister_uprobe_event(struct trace_uprobe *tu);
74 74
75static DEFINE_MUTEX(uprobe_lock); 75static DEFINE_MUTEX(uprobe_lock);
76static LIST_HEAD(uprobe_list); 76static LIST_HEAD(uprobe_list);
@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
164} 164}
165 165
166/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ 166/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
167static void unregister_trace_uprobe(struct trace_uprobe *tu) 167static int unregister_trace_uprobe(struct trace_uprobe *tu)
168{ 168{
169 int ret;
170
171 ret = unregister_uprobe_event(tu);
172 if (ret)
173 return ret;
174
169 list_del(&tu->list); 175 list_del(&tu->list);
170 unregister_uprobe_event(tu);
171 free_trace_uprobe(tu); 176 free_trace_uprobe(tu);
177 return 0;
172} 178}
173 179
174/* Register a trace_uprobe and probe_event */ 180/* Register a trace_uprobe and probe_event */
@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
181 187
182 /* register as an event */ 188 /* register as an event */
183 old_tp = find_probe_event(tu->call.name, tu->call.class->system); 189 old_tp = find_probe_event(tu->call.name, tu->call.class->system);
184 if (old_tp) 190 if (old_tp) {
185 /* delete old event */ 191 /* delete old event */
186 unregister_trace_uprobe(old_tp); 192 ret = unregister_trace_uprobe(old_tp);
193 if (ret)
194 goto end;
195 }
187 196
188 ret = register_uprobe_event(tu); 197 ret = register_uprobe_event(tu);
189 if (ret) { 198 if (ret) {
@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv)
256 group = UPROBE_EVENT_SYSTEM; 265 group = UPROBE_EVENT_SYSTEM;
257 266
258 if (is_delete) { 267 if (is_delete) {
268 int ret;
269
259 if (!event) { 270 if (!event) {
260 pr_info("Delete command needs an event name.\n"); 271 pr_info("Delete command needs an event name.\n");
261 return -EINVAL; 272 return -EINVAL;
@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv)
269 return -ENOENT; 280 return -ENOENT;
270 } 281 }
271 /* delete an event */ 282 /* delete an event */
272 unregister_trace_uprobe(tu); 283 ret = unregister_trace_uprobe(tu);
273 mutex_unlock(&uprobe_lock); 284 mutex_unlock(&uprobe_lock);
274 return 0; 285 return ret;
275 } 286 }
276 287
277 if (argc < 2) { 288 if (argc < 2) {
@@ -408,16 +419,20 @@ fail_address_parse:
408 return ret; 419 return ret;
409} 420}
410 421
411static void cleanup_all_probes(void) 422static int cleanup_all_probes(void)
412{ 423{
413 struct trace_uprobe *tu; 424 struct trace_uprobe *tu;
425 int ret = 0;
414 426
415 mutex_lock(&uprobe_lock); 427 mutex_lock(&uprobe_lock);
416 while (!list_empty(&uprobe_list)) { 428 while (!list_empty(&uprobe_list)) {
417 tu = list_entry(uprobe_list.next, struct trace_uprobe, list); 429 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
418 unregister_trace_uprobe(tu); 430 ret = unregister_trace_uprobe(tu);
431 if (ret)
432 break;
419 } 433 }
420 mutex_unlock(&uprobe_lock); 434 mutex_unlock(&uprobe_lock);
435 return ret;
421} 436}
422 437
423/* Probes listing interfaces */ 438/* Probes listing interfaces */
@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = {
462 477
463static int probes_open(struct inode *inode, struct file *file) 478static int probes_open(struct inode *inode, struct file *file)
464{ 479{
465 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) 480 int ret;
466 cleanup_all_probes(); 481
482 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
483 ret = cleanup_all_probes();
484 if (ret)
485 return ret;
486 }
467 487
468 return seq_open(file, &probes_seq_op); 488 return seq_open(file, &probes_seq_op);
469} 489}
@@ -818,8 +838,6 @@ static void uprobe_perf_print(struct trace_uprobe *tu,
818 838
819 size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 839 size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
820 size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32); 840 size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32);
821 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
822 return;
823 841
824 preempt_disable(); 842 preempt_disable();
825 head = this_cpu_ptr(call->perf_events); 843 head = this_cpu_ptr(call->perf_events);
@@ -970,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu)
970 return ret; 988 return ret;
971} 989}
972 990
973static void unregister_uprobe_event(struct trace_uprobe *tu) 991static int unregister_uprobe_event(struct trace_uprobe *tu)
974{ 992{
993 int ret;
994
975 /* tu->event is unregistered in trace_remove_event_call() */ 995 /* tu->event is unregistered in trace_remove_event_call() */
976 trace_remove_event_call(&tu->call); 996 ret = trace_remove_event_call(&tu->call);
997 if (ret)
998 return ret;
977 kfree(tu->call.print_fmt); 999 kfree(tu->call.print_fmt);
978 tu->call.print_fmt = NULL; 1000 tu->call.print_fmt = NULL;
1001 return 0;
979} 1002}
980 1003
981/* Make a trace interface for controling probe points */ 1004/* Make a trace interface for controling probe points */
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index d8c30db06c5b..9064b919a406 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -62,6 +62,9 @@ int create_user_ns(struct cred *new)
62 kgid_t group = new->egid; 62 kgid_t group = new->egid;
63 int ret; 63 int ret;
64 64
65 if (parent_ns->level > 32)
66 return -EUSERS;
67
65 /* 68 /*
66 * Verify that we can not violate the policy of which files 69 * Verify that we can not violate the policy of which files
67 * may be accessed that is specified by the root directory, 70 * may be accessed that is specified by the root directory,
@@ -92,6 +95,7 @@ int create_user_ns(struct cred *new)
92 atomic_set(&ns->count, 1); 95 atomic_set(&ns->count, 1);
93 /* Leave the new->user_ns reference with the new user namespace. */ 96 /* Leave the new->user_ns reference with the new user namespace. */
94 ns->parent = parent_ns; 97 ns->parent = parent_ns;
98 ns->level = parent_ns->level + 1;
95 ns->owner = owner; 99 ns->owner = owner;
96 ns->group = group; 100 ns->group = group;
97 101
@@ -105,16 +109,21 @@ int create_user_ns(struct cred *new)
105int unshare_userns(unsigned long unshare_flags, struct cred **new_cred) 109int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
106{ 110{
107 struct cred *cred; 111 struct cred *cred;
112 int err = -ENOMEM;
108 113
109 if (!(unshare_flags & CLONE_NEWUSER)) 114 if (!(unshare_flags & CLONE_NEWUSER))
110 return 0; 115 return 0;
111 116
112 cred = prepare_creds(); 117 cred = prepare_creds();
113 if (!cred) 118 if (cred) {
114 return -ENOMEM; 119 err = create_user_ns(cred);
120 if (err)
121 put_cred(cred);
122 else
123 *new_cred = cred;
124 }
115 125
116 *new_cred = cred; 126 return err;
117 return create_user_ns(cred);
118} 127}
119 128
120void free_user_ns(struct user_namespace *ns) 129void free_user_ns(struct user_namespace *ns)
diff --git a/kernel/wait.c b/kernel/wait.c
index ce0daa320a26..d550920e040c 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -333,7 +333,8 @@ int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
333 prepare_to_wait(wq, &q->wait, mode); 333 prepare_to_wait(wq, &q->wait, mode);
334 val = q->key.flags; 334 val = q->key.flags;
335 if (atomic_read(val) == 0) 335 if (atomic_read(val) == 0)
336 ret = (*action)(val); 336 break;
337 ret = (*action)(val);
337 } while (!ret && atomic_read(val) != 0); 338 } while (!ret && atomic_read(val) != 0);
338 finish_wait(wq, &q->wait); 339 finish_wait(wq, &q->wait);
339 return ret; 340 return ret;
@@ -362,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
362 363
363/** 364/**
364 * wake_up_atomic_t - Wake up a waiter on a atomic_t 365 * wake_up_atomic_t - Wake up a waiter on a atomic_t
365 * @word: The word being waited on, a kernel virtual address 366 * @p: The atomic_t being waited on, a kernel virtual address
366 * @bit: The bit of the word being waited on
367 * 367 *
368 * Wake up anyone waiting for the atomic_t to go to zero. 368 * Wake up anyone waiting for the atomic_t to go to zero.
369 * 369 *
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0b72e816b8d0..7f5d4be22034 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2817,6 +2817,19 @@ already_gone:
2817 return false; 2817 return false;
2818} 2818}
2819 2819
2820static bool __flush_work(struct work_struct *work)
2821{
2822 struct wq_barrier barr;
2823
2824 if (start_flush_work(work, &barr)) {
2825 wait_for_completion(&barr.done);
2826 destroy_work_on_stack(&barr.work);
2827 return true;
2828 } else {
2829 return false;
2830 }
2831}
2832
2820/** 2833/**
2821 * flush_work - wait for a work to finish executing the last queueing instance 2834 * flush_work - wait for a work to finish executing the last queueing instance
2822 * @work: the work to flush 2835 * @work: the work to flush
@@ -2830,18 +2843,10 @@ already_gone:
2830 */ 2843 */
2831bool flush_work(struct work_struct *work) 2844bool flush_work(struct work_struct *work)
2832{ 2845{
2833 struct wq_barrier barr;
2834
2835 lock_map_acquire(&work->lockdep_map); 2846 lock_map_acquire(&work->lockdep_map);
2836 lock_map_release(&work->lockdep_map); 2847 lock_map_release(&work->lockdep_map);
2837 2848
2838 if (start_flush_work(work, &barr)) { 2849 return __flush_work(work);
2839 wait_for_completion(&barr.done);
2840 destroy_work_on_stack(&barr.work);
2841 return true;
2842 } else {
2843 return false;
2844 }
2845} 2850}
2846EXPORT_SYMBOL_GPL(flush_work); 2851EXPORT_SYMBOL_GPL(flush_work);
2847 2852
@@ -3411,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
3411{ 3416{
3412 to->nice = from->nice; 3417 to->nice = from->nice;
3413 cpumask_copy(to->cpumask, from->cpumask); 3418 cpumask_copy(to->cpumask, from->cpumask);
3419 /*
3420 * Unlike hash and equality test, this function doesn't ignore
3421 * ->no_numa as it is used for both pool and wq attrs. Instead,
3422 * get_unbound_pool() explicitly clears ->no_numa after copying.
3423 */
3424 to->no_numa = from->no_numa;
3414} 3425}
3415 3426
3416/* hash value of the content of @attr */ 3427/* hash value of the content of @attr */
@@ -3578,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3578 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 3589 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3579 copy_workqueue_attrs(pool->attrs, attrs); 3590 copy_workqueue_attrs(pool->attrs, attrs);
3580 3591
3592 /*
3593 * no_numa isn't a worker_pool attribute, always clear it. See
3594 * 'struct workqueue_attrs' comments for detail.
3595 */
3596 pool->attrs->no_numa = false;
3597
3581 /* if cpumask is contained inside a NUMA node, we belong to that node */ 3598 /* if cpumask is contained inside a NUMA node, we belong to that node */
3582 if (wq_numa_enabled) { 3599 if (wq_numa_enabled) {
3583 for_each_node(node) { 3600 for_each_node(node) {
@@ -4756,7 +4773,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4756 4773
4757 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4774 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4758 schedule_work_on(cpu, &wfc.work); 4775 schedule_work_on(cpu, &wfc.work);
4759 flush_work(&wfc.work); 4776
4777 /*
4778 * The work item is on-stack and can't lead to deadlock through
4779 * flushing. Use __flush_work() to avoid spurious lockdep warnings
4780 * when work_on_cpu()s are nested.
4781 */
4782 __flush_work(&wfc.work);
4783
4760 return wfc.ret; 4784 return wfc.ret;
4761} 4785}
4762EXPORT_SYMBOL_GPL(work_on_cpu); 4786EXPORT_SYMBOL_GPL(work_on_cpu);
diff --git a/lib/Kconfig b/lib/Kconfig
index 35da51359d40..71d9f81f6eed 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -66,8 +66,6 @@ config CRC16
66 66
67config CRC_T10DIF 67config CRC_T10DIF
68 tristate "CRC calculation for the T10 Data Integrity Field" 68 tristate "CRC calculation for the T10 Data Integrity Field"
69 select CRYPTO
70 select CRYPTO_CRCT10DIF
71 help 69 help
72 This option is only needed if a module that's not in the 70 This option is only needed if a module that's not in the
73 kernel tree needs to calculate CRC checks for use with the 71 kernel tree needs to calculate CRC checks for use with the
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index fe3428c07b47..fbbd66ed86cd 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -11,44 +11,57 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/crc-t10dif.h> 13#include <linux/crc-t10dif.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <crypto/hash.h>
17 14
18static struct crypto_shash *crct10dif_tfm; 15/* Table generated using the following polynomium:
16 * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
17 * gt: 0x8bb7
18 */
19static const __u16 t10_dif_crc_table[256] = {
20 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
21 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
22 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
23 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
24 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
25 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
26 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
27 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
28 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
29 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
30 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
31 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
32 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
33 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
34 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
35 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
36 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
37 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
38 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
39 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
40 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
41 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
42 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
43 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
44 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
45 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
46 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
47 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
48 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
49 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
50 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
51 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
52};
19 53
20__u16 crc_t10dif(const unsigned char *buffer, size_t len) 54__u16 crc_t10dif(const unsigned char *buffer, size_t len)
21{ 55{
22 struct { 56 __u16 crc = 0;
23 struct shash_desc shash; 57 unsigned int i;
24 char ctx[2];
25 } desc;
26 int err;
27
28 desc.shash.tfm = crct10dif_tfm;
29 desc.shash.flags = 0;
30 *(__u16 *)desc.ctx = 0;
31 58
32 err = crypto_shash_update(&desc.shash, buffer, len); 59 for (i = 0 ; i < len ; i++)
33 BUG_ON(err); 60 crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
34 61
35 return *(__u16 *)desc.ctx; 62 return crc;
36} 63}
37EXPORT_SYMBOL(crc_t10dif); 64EXPORT_SYMBOL(crc_t10dif);
38 65
39static int __init crc_t10dif_mod_init(void)
40{
41 crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
42 return PTR_RET(crct10dif_tfm);
43}
44
45static void __exit crc_t10dif_mod_fini(void)
46{
47 crypto_free_shash(crct10dif_tfm);
48}
49
50module_init(crc_t10dif_mod_init);
51module_exit(crc_t10dif_mod_fini);
52
53MODULE_DESCRIPTION("T10 DIF CRC calculation"); 66MODULE_DESCRIPTION("T10 DIF CRC calculation");
54MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index fd94058bd7f9..28321d8f75ef 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -437,7 +437,7 @@ int lz4_compress(const unsigned char *src, size_t src_len,
437exit: 437exit:
438 return ret; 438 return ret;
439} 439}
440EXPORT_SYMBOL_GPL(lz4_compress); 440EXPORT_SYMBOL(lz4_compress);
441 441
442MODULE_LICENSE("GPL"); 442MODULE_LICENSE("Dual BSD/GPL");
443MODULE_DESCRIPTION("LZ4 compressor"); 443MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index d3414eae73a1..411be80ddb46 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -299,7 +299,7 @@ exit_0:
299 return ret; 299 return ret;
300} 300}
301#ifndef STATIC 301#ifndef STATIC
302EXPORT_SYMBOL_GPL(lz4_decompress); 302EXPORT_SYMBOL(lz4_decompress);
303#endif 303#endif
304 304
305int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, 305int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
@@ -319,8 +319,8 @@ exit_0:
319 return ret; 319 return ret;
320} 320}
321#ifndef STATIC 321#ifndef STATIC
322EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize); 322EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
323 323
324MODULE_LICENSE("GPL"); 324MODULE_LICENSE("Dual BSD/GPL");
325MODULE_DESCRIPTION("LZ4 Decompressor"); 325MODULE_DESCRIPTION("LZ4 Decompressor");
326#endif 326#endif
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index eb1a74f5e368..f344f76b6559 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -533,7 +533,7 @@ int lz4hc_compress(const unsigned char *src, size_t src_len,
533exit: 533exit:
534 return ret; 534 return ret;
535} 535}
536EXPORT_SYMBOL_GPL(lz4hc_compress); 536EXPORT_SYMBOL(lz4hc_compress);
537 537
538MODULE_LICENSE("GPL"); 538MODULE_LICENSE("Dual BSD/GPL");
539MODULE_DESCRIPTION("LZ4HC compressor"); 539MODULE_DESCRIPTION("LZ4HC compressor");
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index d411355f238e..aac511417ad1 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -151,15 +151,12 @@ do { \
151#endif /* __a29k__ */ 151#endif /* __a29k__ */
152 152
153#if defined(__alpha) && W_TYPE_SIZE == 64 153#if defined(__alpha) && W_TYPE_SIZE == 64
154#define umul_ppmm(ph, pl, m0, m1) \ 154#define umul_ppmm(ph, pl, m0, m1) \
155do { \ 155do { \
156 UDItype __m0 = (m0), __m1 = (m1); \ 156 UDItype __m0 = (m0), __m1 = (m1); \
157 __asm__ ("umulh %r1,%2,%0" \ 157 (ph) = __builtin_alpha_umulh(__m0, __m1); \
158 : "=r" ((UDItype) ph) \ 158 (pl) = __m0 * __m1; \
159 : "%rJ" (__m0), \ 159} while (0)
160 "rI" (__m1)); \
161 (pl) = __m0 * __m1; \
162 } while (0)
163#define UMUL_TIME 46 160#define UMUL_TIME 46
164#ifndef LONGLONG_STANDALONE 161#ifndef LONGLONG_STANDALONE
165#define udiv_qrnnd(q, r, n1, n0, d) \ 162#define udiv_qrnnd(q, r, n1, n0, d) \
@@ -167,7 +164,7 @@ do { UDItype __r; \
167 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ 164 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
168 (r) = __r; \ 165 (r) = __r; \
169} while (0) 166} while (0)
170extern UDItype __udiv_qrnnd(); 167extern UDItype __udiv_qrnnd(UDItype *, UDItype, UDItype, UDItype);
171#define UDIV_TIME 220 168#define UDIV_TIME 220
172#endif /* LONGLONG_STANDALONE */ 169#endif /* LONGLONG_STANDALONE */
173#endif /* __alpha */ 170#endif /* __alpha */
diff --git a/mm/fremap.c b/mm/fremap.c
index 87da3590c61e..5bff08147768 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
57 unsigned long addr, unsigned long pgoff, pgprot_t prot) 57 unsigned long addr, unsigned long pgoff, pgprot_t prot)
58{ 58{
59 int err = -ENOMEM; 59 int err = -ENOMEM;
60 pte_t *pte; 60 pte_t *pte, ptfile;
61 spinlock_t *ptl; 61 spinlock_t *ptl;
62 62
63 pte = get_locked_pte(mm, addr, &ptl); 63 pte = get_locked_pte(mm, addr, &ptl);
64 if (!pte) 64 if (!pte)
65 goto out; 65 goto out;
66 66
67 if (!pte_none(*pte)) 67 ptfile = pgoff_to_pte(pgoff);
68
69 if (!pte_none(*pte)) {
70 if (pte_present(*pte) && pte_soft_dirty(*pte))
71 pte_file_mksoft_dirty(ptfile);
68 zap_pte(mm, vma, addr, pte); 72 zap_pte(mm, vma, addr, pte);
73 }
69 74
70 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); 75 set_pte_at(mm, addr, pte, ptfile);
71 /* 76 /*
72 * We don't need to run update_mmu_cache() here because the "file pte" 77 * We don't need to run update_mmu_cache() here because the "file pte"
73 * being installed by install_file_pte() is not a real pte - it's a 78 * being installed by install_file_pte() is not a real pte - it's a
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 243e710c6039..a92012a71702 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1620,7 +1620,9 @@ static void __split_huge_page_refcount(struct page *page,
1620 ((1L << PG_referenced) | 1620 ((1L << PG_referenced) |
1621 (1L << PG_swapbacked) | 1621 (1L << PG_swapbacked) |
1622 (1L << PG_mlocked) | 1622 (1L << PG_mlocked) |
1623 (1L << PG_uptodate))); 1623 (1L << PG_uptodate) |
1624 (1L << PG_active) |
1625 (1L << PG_unevictable)));
1624 page_tail->flags |= (1L << PG_dirty); 1626 page_tail->flags |= (1L << PG_dirty);
1625 1627
1626 /* clear PageTail before overwriting first_page */ 1628 /* clear PageTail before overwriting first_page */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 83aff0a4d093..b60f33080a28 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2490 2490
2491 mm = vma->vm_mm; 2491 mm = vma->vm_mm;
2492 2492
2493 tlb_gather_mmu(&tlb, mm, 0); 2493 tlb_gather_mmu(&tlb, mm, start, end);
2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2495 tlb_finish_mmu(&tlb, start, end); 2495 tlb_finish_mmu(&tlb, start, end);
2496} 2496}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 00a7a664b9c1..0878ff7c26a9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3195 if (!s->memcg_params) 3195 if (!s->memcg_params)
3196 return -ENOMEM; 3196 return -ENOMEM;
3197 3197
3198 INIT_WORK(&s->memcg_params->destroy,
3199 kmem_cache_destroy_work_func);
3200 if (memcg) { 3198 if (memcg) {
3201 s->memcg_params->memcg = memcg; 3199 s->memcg_params->memcg = memcg;
3202 s->memcg_params->root_cache = root_cache; 3200 s->memcg_params->root_cache = root_cache;
3201 INIT_WORK(&s->memcg_params->destroy,
3202 kmem_cache_destroy_work_func);
3203 } else 3203 } else
3204 s->memcg_params->is_root_cache = true; 3204 s->memcg_params->is_root_cache = true;
3205 3205
@@ -6335,6 +6335,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
6335 mem_cgroup_invalidate_reclaim_iterators(memcg); 6335 mem_cgroup_invalidate_reclaim_iterators(memcg);
6336 mem_cgroup_reparent_charges(memcg); 6336 mem_cgroup_reparent_charges(memcg);
6337 mem_cgroup_destroy_all_caches(memcg); 6337 mem_cgroup_destroy_all_caches(memcg);
6338 vmpressure_cleanup(&memcg->vmpressure);
6338} 6339}
6339 6340
6340static void mem_cgroup_css_free(struct cgroup *cont) 6341static void mem_cgroup_css_free(struct cgroup *cont)
@@ -6968,7 +6969,6 @@ struct cgroup_subsys mem_cgroup_subsys = {
6968#ifdef CONFIG_MEMCG_SWAP 6969#ifdef CONFIG_MEMCG_SWAP
6969static int __init enable_swap_account(char *s) 6970static int __init enable_swap_account(char *s)
6970{ 6971{
6971 /* consider enabled if no parameter or 1 is given */
6972 if (!strcmp(s, "1")) 6972 if (!strcmp(s, "1"))
6973 really_do_swap_account = 1; 6973 really_do_swap_account = 1;
6974 else if (!strcmp(s, "0")) 6974 else if (!strcmp(s, "0"))
diff --git a/mm/memory.c b/mm/memory.c
index 1ce2e2a734fc..af84bc0ec17c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
209 * tear-down from @mm. The @fullmm argument is used when @mm is without 209 * tear-down from @mm. The @fullmm argument is used when @mm is without
210 * users and we're going to destroy the full address space (exit/execve). 210 * users and we're going to destroy the full address space (exit/execve).
211 */ 211 */
212void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) 212void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
213{ 213{
214 tlb->mm = mm; 214 tlb->mm = mm;
215 215
216 tlb->fullmm = fullmm; 216 /* Is it from 0 to ~0? */
217 tlb->fullmm = !(start | (end+1));
217 tlb->need_flush_all = 0; 218 tlb->need_flush_all = 0;
218 tlb->start = -1UL; 219 tlb->start = start;
219 tlb->end = 0; 220 tlb->end = end;
220 tlb->need_flush = 0; 221 tlb->need_flush = 0;
221 tlb->local.next = NULL; 222 tlb->local.next = NULL;
222 tlb->local.nr = 0; 223 tlb->local.nr = 0;
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
256{ 257{
257 struct mmu_gather_batch *batch, *next; 258 struct mmu_gather_batch *batch, *next;
258 259
259 tlb->start = start;
260 tlb->end = end;
261 tlb_flush_mmu(tlb); 260 tlb_flush_mmu(tlb);
262 261
263 /* keep the page table cache within bounds */ 262 /* keep the page table cache within bounds */
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1099 spinlock_t *ptl; 1098 spinlock_t *ptl;
1100 pte_t *start_pte; 1099 pte_t *start_pte;
1101 pte_t *pte; 1100 pte_t *pte;
1102 unsigned long range_start = addr;
1103 1101
1104again: 1102again:
1105 init_rss_vec(rss); 1103 init_rss_vec(rss);
@@ -1141,9 +1139,12 @@ again:
1141 continue; 1139 continue;
1142 if (unlikely(details) && details->nonlinear_vma 1140 if (unlikely(details) && details->nonlinear_vma
1143 && linear_page_index(details->nonlinear_vma, 1141 && linear_page_index(details->nonlinear_vma,
1144 addr) != page->index) 1142 addr) != page->index) {
1145 set_pte_at(mm, addr, pte, 1143 pte_t ptfile = pgoff_to_pte(page->index);
1146 pgoff_to_pte(page->index)); 1144 if (pte_soft_dirty(ptent))
1145 pte_file_mksoft_dirty(ptfile);
1146 set_pte_at(mm, addr, pte, ptfile);
1147 }
1147 if (PageAnon(page)) 1148 if (PageAnon(page))
1148 rss[MM_ANONPAGES]--; 1149 rss[MM_ANONPAGES]--;
1149 else { 1150 else {
@@ -1202,17 +1203,25 @@ again:
1202 * and page-free while holding it. 1203 * and page-free while holding it.
1203 */ 1204 */
1204 if (force_flush) { 1205 if (force_flush) {
1206 unsigned long old_end;
1207
1205 force_flush = 0; 1208 force_flush = 0;
1206 1209
1207#ifdef HAVE_GENERIC_MMU_GATHER 1210 /*
1208 tlb->start = range_start; 1211 * Flush the TLB just for the previous segment,
1212 * then update the range to be the remaining
1213 * TLB range.
1214 */
1215 old_end = tlb->end;
1209 tlb->end = addr; 1216 tlb->end = addr;
1210#endif 1217
1211 tlb_flush_mmu(tlb); 1218 tlb_flush_mmu(tlb);
1212 if (addr != end) { 1219
1213 range_start = addr; 1220 tlb->start = addr;
1221 tlb->end = old_end;
1222
1223 if (addr != end)
1214 goto again; 1224 goto again;
1215 }
1216 } 1225 }
1217 1226
1218 return addr; 1227 return addr;
@@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1397 unsigned long end = start + size; 1406 unsigned long end = start + size;
1398 1407
1399 lru_add_drain(); 1408 lru_add_drain();
1400 tlb_gather_mmu(&tlb, mm, 0); 1409 tlb_gather_mmu(&tlb, mm, start, end);
1401 update_hiwater_rss(mm); 1410 update_hiwater_rss(mm);
1402 mmu_notifier_invalidate_range_start(mm, start, end); 1411 mmu_notifier_invalidate_range_start(mm, start, end);
1403 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) 1412 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
1423 unsigned long end = address + size; 1432 unsigned long end = address + size;
1424 1433
1425 lru_add_drain(); 1434 lru_add_drain();
1426 tlb_gather_mmu(&tlb, mm, 0); 1435 tlb_gather_mmu(&tlb, mm, address, end);
1427 update_hiwater_rss(mm); 1436 update_hiwater_rss(mm);
1428 mmu_notifier_invalidate_range_start(mm, address, end); 1437 mmu_notifier_invalidate_range_start(mm, address, end);
1429 unmap_single_vma(&tlb, vma, address, end, details); 1438 unmap_single_vma(&tlb, vma, address, end, details);
@@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3115 exclusive = 1; 3124 exclusive = 1;
3116 } 3125 }
3117 flush_icache_page(vma, page); 3126 flush_icache_page(vma, page);
3127 if (pte_swp_soft_dirty(orig_pte))
3128 pte = pte_mksoft_dirty(pte);
3118 set_pte_at(mm, address, page_table, pte); 3129 set_pte_at(mm, address, page_table, pte);
3119 if (page == swapcache) 3130 if (page == swapcache)
3120 do_page_add_anon_rmap(page, vma, address, exclusive); 3131 do_page_add_anon_rmap(page, vma, address, exclusive);
@@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3408 entry = mk_pte(page, vma->vm_page_prot); 3419 entry = mk_pte(page, vma->vm_page_prot);
3409 if (flags & FAULT_FLAG_WRITE) 3420 if (flags & FAULT_FLAG_WRITE)
3410 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3421 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3422 else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
3423 pte_mksoft_dirty(entry);
3411 if (anon) { 3424 if (anon) {
3412 inc_mm_counter_fast(mm, MM_ANONPAGES); 3425 inc_mm_counter_fast(mm, MM_ANONPAGES);
3413 page_add_new_anon_rmap(page, vma, address); 3426 page_add_new_anon_rmap(page, vma, address);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 74310017296e..4baf12e534d1 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -732,7 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
732 if (prev) { 732 if (prev) {
733 vma = prev; 733 vma = prev;
734 next = vma->vm_next; 734 next = vma->vm_next;
735 continue; 735 if (mpol_equal(vma_policy(vma), new_pol))
736 continue;
737 /* vma_merge() joined vma && vma->next, case 8 */
738 goto replace;
736 } 739 }
737 if (vma->vm_start != vmstart) { 740 if (vma->vm_start != vmstart) {
738 err = split_vma(vma->vm_mm, vma, vmstart, 1); 741 err = split_vma(vma->vm_mm, vma, vmstart, 1);
@@ -744,6 +747,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
744 if (err) 747 if (err)
745 goto out; 748 goto out;
746 } 749 }
750 replace:
747 err = vma_replace_policy(vma, new_pol); 751 err = vma_replace_policy(vma, new_pol);
748 if (err) 752 if (err)
749 goto out; 753 goto out;
diff --git a/mm/mmap.c b/mm/mmap.c
index fbad7b091090..f9c97d10b873 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -865,7 +865,7 @@ again: remove_next = 1 + (end > next->vm_end);
865 if (next->anon_vma) 865 if (next->anon_vma)
866 anon_vma_merge(vma, next); 866 anon_vma_merge(vma, next);
867 mm->map_count--; 867 mm->map_count--;
868 vma_set_policy(vma, vma_policy(next)); 868 mpol_put(vma_policy(next));
869 kmem_cache_free(vm_area_cachep, next); 869 kmem_cache_free(vm_area_cachep, next);
870 /* 870 /*
871 * In mprotect's case 6 (see comments on vma_merge), 871 * In mprotect's case 6 (see comments on vma_merge),
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm,
2336 struct mmu_gather tlb; 2336 struct mmu_gather tlb;
2337 2337
2338 lru_add_drain(); 2338 lru_add_drain();
2339 tlb_gather_mmu(&tlb, mm, 0); 2339 tlb_gather_mmu(&tlb, mm, start, end);
2340 update_hiwater_rss(mm); 2340 update_hiwater_rss(mm);
2341 unmap_vmas(&tlb, vma, start, end); 2341 unmap_vmas(&tlb, vma, start, end);
2342 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2342 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm)
2709 2709
2710 lru_add_drain(); 2710 lru_add_drain();
2711 flush_cache_mm(mm); 2711 flush_cache_mm(mm);
2712 tlb_gather_mmu(&tlb, mm, 1); 2712 tlb_gather_mmu(&tlb, mm, 0, -1);
2713 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2713 /* update_hiwater_rss(mm) here? but nobody should be looking */
2714 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2714 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2715 unmap_vmas(&tlb, vma, 0, -1); 2715 unmap_vmas(&tlb, vma, 0, -1);
diff --git a/mm/rmap.c b/mm/rmap.c
index cd356df4f71a..b2e29acd7e3d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1236 swp_entry_to_pte(make_hwpoison_entry(page))); 1236 swp_entry_to_pte(make_hwpoison_entry(page)));
1237 } else if (PageAnon(page)) { 1237 } else if (PageAnon(page)) {
1238 swp_entry_t entry = { .val = page_private(page) }; 1238 swp_entry_t entry = { .val = page_private(page) };
1239 pte_t swp_pte;
1239 1240
1240 if (PageSwapCache(page)) { 1241 if (PageSwapCache(page)) {
1241 /* 1242 /*
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1264 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 1265 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
1265 entry = make_migration_entry(page, pte_write(pteval)); 1266 entry = make_migration_entry(page, pte_write(pteval));
1266 } 1267 }
1267 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 1268 swp_pte = swp_entry_to_pte(entry);
1269 if (pte_soft_dirty(pteval))
1270 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1271 set_pte_at(mm, address, pte, swp_pte);
1268 BUG_ON(pte_file(*pte)); 1272 BUG_ON(pte_file(*pte));
1269 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1273 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1270 (TTU_ACTION(flags) == TTU_MIGRATION)) { 1274 (TTU_ACTION(flags) == TTU_MIGRATION)) {
@@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1401 pteval = ptep_clear_flush(vma, address, pte); 1405 pteval = ptep_clear_flush(vma, address, pte);
1402 1406
1403 /* If nonlinear, store the file page offset in the pte. */ 1407 /* If nonlinear, store the file page offset in the pte. */
1404 if (page->index != linear_page_index(vma, address)) 1408 if (page->index != linear_page_index(vma, address)) {
1405 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 1409 pte_t ptfile = pgoff_to_pte(page->index);
1410 if (pte_soft_dirty(pteval))
1411 pte_file_mksoft_dirty(ptfile);
1412 set_pte_at(mm, address, pte, ptfile);
1413 }
1406 1414
1407 /* Move the dirty bit to the physical page now the pte is gone. */ 1415 /* Move the dirty bit to the physical page now the pte is gone. */
1408 if (pte_dirty(pteval)) 1416 if (pte_dirty(pteval))
diff --git a/mm/shmem.c b/mm/shmem.c
index a87990cf9f94..e43dc555069d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1798,7 +1798,8 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
1798 } 1798 }
1799 } 1799 }
1800 1800
1801 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 1801 if (offset >= 0)
1802 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
1802 mutex_unlock(&inode->i_mutex); 1803 mutex_unlock(&inode->i_mutex);
1803 return offset; 1804 return offset;
1804} 1805}
@@ -2908,14 +2909,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
2908 2909
2909/* common code */ 2910/* common code */
2910 2911
2911static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
2912{
2913 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
2914 dentry->d_name.name);
2915}
2916
2917static struct dentry_operations anon_ops = { 2912static struct dentry_operations anon_ops = {
2918 .d_dname = shmem_dname 2913 .d_dname = simple_dname
2919}; 2914};
2920 2915
2921/** 2916/**
diff --git a/mm/slub.c b/mm/slub.c
index 2b02d666bf63..e3ba1f2cf60c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1968,9 +1968,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1968 int pages; 1968 int pages;
1969 int pobjects; 1969 int pobjects;
1970 1970
1971 if (!s->cpu_partial)
1972 return;
1973
1974 do { 1971 do {
1975 pages = 0; 1972 pages = 0;
1976 pobjects = 0; 1973 pobjects = 0;
diff --git a/mm/swap.c b/mm/swap.c
index 4a1d0d2c52fa..62b78a6e224f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -512,12 +512,7 @@ EXPORT_SYMBOL(__lru_cache_add);
512 */ 512 */
513void lru_cache_add(struct page *page) 513void lru_cache_add(struct page *page)
514{ 514{
515 if (PageActive(page)) { 515 VM_BUG_ON(PageActive(page) && PageUnevictable(page));
516 VM_BUG_ON(PageUnevictable(page));
517 } else if (PageUnevictable(page)) {
518 VM_BUG_ON(PageActive(page));
519 }
520
521 VM_BUG_ON(PageLRU(page)); 516 VM_BUG_ON(PageLRU(page));
522 __lru_cache_add(page); 517 __lru_cache_add(page);
523} 518}
@@ -539,6 +534,7 @@ void add_page_to_unevictable_list(struct page *page)
539 534
540 spin_lock_irq(&zone->lru_lock); 535 spin_lock_irq(&zone->lru_lock);
541 lruvec = mem_cgroup_page_lruvec(page, zone); 536 lruvec = mem_cgroup_page_lruvec(page, zone);
537 ClearPageActive(page);
542 SetPageUnevictable(page); 538 SetPageUnevictable(page);
543 SetPageLRU(page); 539 SetPageLRU(page);
544 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); 540 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
@@ -774,8 +770,6 @@ EXPORT_SYMBOL(__pagevec_release);
774void lru_add_page_tail(struct page *page, struct page *page_tail, 770void lru_add_page_tail(struct page *page, struct page *page_tail,
775 struct lruvec *lruvec, struct list_head *list) 771 struct lruvec *lruvec, struct list_head *list)
776{ 772{
777 int uninitialized_var(active);
778 enum lru_list lru;
779 const int file = 0; 773 const int file = 0;
780 774
781 VM_BUG_ON(!PageHead(page)); 775 VM_BUG_ON(!PageHead(page));
@@ -787,20 +781,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
787 if (!list) 781 if (!list)
788 SetPageLRU(page_tail); 782 SetPageLRU(page_tail);
789 783
790 if (page_evictable(page_tail)) {
791 if (PageActive(page)) {
792 SetPageActive(page_tail);
793 active = 1;
794 lru = LRU_ACTIVE_ANON;
795 } else {
796 active = 0;
797 lru = LRU_INACTIVE_ANON;
798 }
799 } else {
800 SetPageUnevictable(page_tail);
801 lru = LRU_UNEVICTABLE;
802 }
803
804 if (likely(PageLRU(page))) 784 if (likely(PageLRU(page)))
805 list_add_tail(&page_tail->lru, &page->lru); 785 list_add_tail(&page_tail->lru, &page->lru);
806 else if (list) { 786 else if (list) {
@@ -816,13 +796,13 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
816 * Use the standard add function to put page_tail on the list, 796 * Use the standard add function to put page_tail on the list,
817 * but then correct its position so they all end up in order. 797 * but then correct its position so they all end up in order.
818 */ 798 */
819 add_page_to_lru_list(page_tail, lruvec, lru); 799 add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
820 list_head = page_tail->lru.prev; 800 list_head = page_tail->lru.prev;
821 list_move_tail(&page_tail->lru, list_head); 801 list_move_tail(&page_tail->lru, list_head);
822 } 802 }
823 803
824 if (!PageUnevictable(page)) 804 if (!PageUnevictable(page))
825 update_page_reclaim_stat(lruvec, file, active); 805 update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
826} 806}
827#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 807#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
828 808
@@ -833,7 +813,6 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
833 int active = PageActive(page); 813 int active = PageActive(page);
834 enum lru_list lru = page_lru(page); 814 enum lru_list lru = page_lru(page);
835 815
836 VM_BUG_ON(PageUnevictable(page));
837 VM_BUG_ON(PageLRU(page)); 816 VM_BUG_ON(PageLRU(page));
838 817
839 SetPageLRU(page); 818 SetPageLRU(page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 36af6eeaa67e..6cf2e60983b7 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free)
866} 866}
867#endif /* CONFIG_HIBERNATION */ 867#endif /* CONFIG_HIBERNATION */
868 868
869static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
870{
871#ifdef CONFIG_MEM_SOFT_DIRTY
872 /*
873 * When pte keeps soft dirty bit the pte generated
874 * from swap entry does not has it, still it's same
875 * pte from logical point of view.
876 */
877 pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
878 return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
879#else
880 return pte_same(pte, swp_pte);
881#endif
882}
883
869/* 884/*
870 * No need to decide whether this PTE shares the swap entry with others, 885 * No need to decide whether this PTE shares the swap entry with others,
871 * just let do_wp_page work it out if a write is requested later - to 886 * just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
892 } 907 }
893 908
894 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 909 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
895 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 910 if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
896 mem_cgroup_cancel_charge_swapin(memcg); 911 mem_cgroup_cancel_charge_swapin(memcg);
897 ret = 0; 912 ret = 0;
898 goto out; 913 goto out;
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
947 * swapoff spends a _lot_ of time in this loop! 962 * swapoff spends a _lot_ of time in this loop!
948 * Test inline before going to call unuse_pte. 963 * Test inline before going to call unuse_pte.
949 */ 964 */
950 if (unlikely(pte_same(*pte, swp_pte))) { 965 if (unlikely(maybe_same_pte(*pte, swp_pte))) {
951 pte_unmap(pte); 966 pte_unmap(pte);
952 ret = unuse_pte(vma, pmd, addr, entry, page); 967 ret = unuse_pte(vma, pmd, addr, entry, page);
953 if (ret) 968 if (ret)
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 736a6011c2c8..0c1e37d829fa 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -180,12 +180,12 @@ static void vmpressure_work_fn(struct work_struct *work)
180 if (!vmpr->scanned) 180 if (!vmpr->scanned)
181 return; 181 return;
182 182
183 mutex_lock(&vmpr->sr_lock); 183 spin_lock(&vmpr->sr_lock);
184 scanned = vmpr->scanned; 184 scanned = vmpr->scanned;
185 reclaimed = vmpr->reclaimed; 185 reclaimed = vmpr->reclaimed;
186 vmpr->scanned = 0; 186 vmpr->scanned = 0;
187 vmpr->reclaimed = 0; 187 vmpr->reclaimed = 0;
188 mutex_unlock(&vmpr->sr_lock); 188 spin_unlock(&vmpr->sr_lock);
189 189
190 do { 190 do {
191 if (vmpressure_event(vmpr, scanned, reclaimed)) 191 if (vmpressure_event(vmpr, scanned, reclaimed))
@@ -240,13 +240,13 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
240 if (!scanned) 240 if (!scanned)
241 return; 241 return;
242 242
243 mutex_lock(&vmpr->sr_lock); 243 spin_lock(&vmpr->sr_lock);
244 vmpr->scanned += scanned; 244 vmpr->scanned += scanned;
245 vmpr->reclaimed += reclaimed; 245 vmpr->reclaimed += reclaimed;
246 scanned = vmpr->scanned; 246 scanned = vmpr->scanned;
247 mutex_unlock(&vmpr->sr_lock); 247 spin_unlock(&vmpr->sr_lock);
248 248
249 if (scanned < vmpressure_win || work_pending(&vmpr->work)) 249 if (scanned < vmpressure_win)
250 return; 250 return;
251 schedule_work(&vmpr->work); 251 schedule_work(&vmpr->work);
252} 252}
@@ -367,8 +367,24 @@ void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
367 */ 367 */
368void vmpressure_init(struct vmpressure *vmpr) 368void vmpressure_init(struct vmpressure *vmpr)
369{ 369{
370 mutex_init(&vmpr->sr_lock); 370 spin_lock_init(&vmpr->sr_lock);
371 mutex_init(&vmpr->events_lock); 371 mutex_init(&vmpr->events_lock);
372 INIT_LIST_HEAD(&vmpr->events); 372 INIT_LIST_HEAD(&vmpr->events);
373 INIT_WORK(&vmpr->work, vmpressure_work_fn); 373 INIT_WORK(&vmpr->work, vmpressure_work_fn);
374} 374}
375
376/**
377 * vmpressure_cleanup() - shuts down vmpressure control structure
378 * @vmpr: Structure to be cleaned up
379 *
380 * This function should be called before the structure in which it is
381 * embedded is cleaned up.
382 */
383void vmpressure_cleanup(struct vmpressure *vmpr)
384{
385 /*
386 * Make sure there is no pending work before eventfd infrastructure
387 * goes away.
388 */
389 flush_work(&vmpr->work);
390}
diff --git a/mm/zbud.c b/mm/zbud.c
index 9bb4710e3589..ad1e781284fd 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -257,7 +257,7 @@ int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp,
257 257
258 if (size <= 0 || gfp & __GFP_HIGHMEM) 258 if (size <= 0 || gfp & __GFP_HIGHMEM)
259 return -EINVAL; 259 return -EINVAL;
260 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED) 260 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
261 return -ENOSPC; 261 return -ENOSPC;
262 chunks = size_to_chunks(size); 262 chunks = size_to_chunks(size);
263 spin_lock(&pool->lock); 263 spin_lock(&pool->lock);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4a78c4de9f20..6ee48aac776f 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
91 91
92struct net_device *vlan_dev_real_dev(const struct net_device *dev) 92struct net_device *vlan_dev_real_dev(const struct net_device *dev)
93{ 93{
94 return vlan_dev_priv(dev)->real_dev; 94 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
95
96 while (is_vlan_dev(ret))
97 ret = vlan_dev_priv(ret)->real_dev;
98
99 return ret;
95} 100}
96EXPORT_SYMBOL(vlan_dev_real_dev); 101EXPORT_SYMBOL(vlan_dev_real_dev);
97 102
diff --git a/net/Kconfig b/net/Kconfig
index 37702491abe9..2b406608a1a4 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -244,7 +244,7 @@ config NETPRIO_CGROUP
244 Cgroup subsystem for use in assigning processes to network priorities on 244 Cgroup subsystem for use in assigning processes to network priorities on
245 a per-interface basis 245 a per-interface basis
246 246
247config NET_LL_RX_POLL 247config NET_RX_BUSY_POLL
248 boolean 248 boolean
249 default y 249 default y
250 250
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index e14531f1ce1c..264de88db320 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1529,6 +1529,8 @@ out:
1529 * in these cases, the skb is further handled by this function and 1529 * in these cases, the skb is further handled by this function and
1530 * returns 1, otherwise it returns 0 and the caller shall further 1530 * returns 1, otherwise it returns 0 and the caller shall further
1531 * process the skb. 1531 * process the skb.
1532 *
1533 * This call might reallocate skb data.
1532 */ 1534 */
1533int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1535int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1534 unsigned short vid) 1536 unsigned short vid)
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index f105219f4a4b..7614af31daff 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -508,6 +508,7 @@ out:
508 return 0; 508 return 0;
509} 509}
510 510
511/* this call might reallocate skb data */
511static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) 512static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
512{ 513{
513 int ret = false; 514 int ret = false;
@@ -568,6 +569,7 @@ out:
568 return ret; 569 return ret;
569} 570}
570 571
572/* this call might reallocate skb data */
571bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) 573bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
572{ 574{
573 struct ethhdr *ethhdr; 575 struct ethhdr *ethhdr;
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
619 621
620 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) 622 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
621 return false; 623 return false;
624
625 /* skb->data might have been reallocated by pskb_may_pull() */
626 ethhdr = (struct ethhdr *)skb->data;
627 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
628 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
629
622 udphdr = (struct udphdr *)(skb->data + *header_len); 630 udphdr = (struct udphdr *)(skb->data + *header_len);
623 *header_len += sizeof(*udphdr); 631 *header_len += sizeof(*udphdr);
624 632
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
634 return true; 642 return true;
635} 643}
636 644
645/* this call might reallocate skb data */
637bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 646bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
638 struct sk_buff *skb, struct ethhdr *ethhdr) 647 struct sk_buff *skb)
639{ 648{
640 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; 649 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
641 struct batadv_orig_node *orig_dst_node = NULL; 650 struct batadv_orig_node *orig_dst_node = NULL;
642 struct batadv_gw_node *curr_gw = NULL; 651 struct batadv_gw_node *curr_gw = NULL;
652 struct ethhdr *ethhdr;
643 bool ret, out_of_range = false; 653 bool ret, out_of_range = false;
644 unsigned int header_len = 0; 654 unsigned int header_len = 0;
645 uint8_t curr_tq_avg; 655 uint8_t curr_tq_avg;
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
648 if (!ret) 658 if (!ret)
649 goto out; 659 goto out;
650 660
661 ethhdr = (struct ethhdr *)skb->data;
651 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, 662 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
652 ethhdr->h_dest); 663 ethhdr->h_dest);
653 if (!orig_dst_node) 664 if (!orig_dst_node)
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 039902dca4a6..1037d75da51f 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
34void batadv_gw_node_purge(struct batadv_priv *bat_priv); 34void batadv_gw_node_purge(struct batadv_priv *bat_priv);
35int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); 35int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
36bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); 36bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
37bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 37bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
38 struct sk_buff *skb, struct ethhdr *ethhdr);
39 38
40#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ 39#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 700d0b49742d..0f04e1c302b4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
180 if (batadv_bla_tx(bat_priv, skb, vid)) 180 if (batadv_bla_tx(bat_priv, skb, vid))
181 goto dropped; 181 goto dropped;
182 182
183 /* skb->data might have been reallocated by batadv_bla_tx() */
184 ethhdr = (struct ethhdr *)skb->data;
185
183 /* Register the client MAC in the transtable */ 186 /* Register the client MAC in the transtable */
184 if (!is_multicast_ether_addr(ethhdr->h_source)) 187 if (!is_multicast_ether_addr(ethhdr->h_source))
185 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); 188 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
220 default: 223 default:
221 break; 224 break;
222 } 225 }
226
227 /* reminder: ethhdr might have become unusable from here on
228 * (batadv_gw_is_dhcp_target() might have reallocated skb data)
229 */
223 } 230 }
224 231
225 /* ethernet packet should be broadcasted */ 232 /* ethernet packet should be broadcasted */
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
266 /* unicast packet */ 273 /* unicast packet */
267 } else { 274 } else {
268 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { 275 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
269 ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); 276 ret = batadv_gw_out_of_range(bat_priv, skb);
270 if (ret) 277 if (ret)
271 goto dropped; 278 goto dropped;
272 } 279 }
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc8b5d4dd636..857e1b8349ee 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
326 * @skb: the skb containing the payload to encapsulate 326 * @skb: the skb containing the payload to encapsulate
327 * @orig_node: the destination node 327 * @orig_node: the destination node
328 * 328 *
329 * Returns false if the payload could not be encapsulated or true otherwise 329 * Returns false if the payload could not be encapsulated or true otherwise.
330 *
331 * This call might reallocate skb data.
330 */ 332 */
331static bool batadv_unicast_prepare_skb(struct sk_buff *skb, 333static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
332 struct batadv_orig_node *orig_node) 334 struct batadv_orig_node *orig_node)
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
343 * @orig_node: the destination node 345 * @orig_node: the destination node
344 * @packet_subtype: the batman 4addr packet subtype to use 346 * @packet_subtype: the batman 4addr packet subtype to use
345 * 347 *
346 * Returns false if the payload could not be encapsulated or true otherwise 348 * Returns false if the payload could not be encapsulated or true otherwise.
349 *
350 * This call might reallocate skb data.
347 */ 351 */
348bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, 352bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
349 struct sk_buff *skb, 353 struct sk_buff *skb,
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
401 struct batadv_neigh_node *neigh_node; 405 struct batadv_neigh_node *neigh_node;
402 int data_len = skb->len; 406 int data_len = skb->len;
403 int ret = NET_RX_DROP; 407 int ret = NET_RX_DROP;
404 unsigned int dev_mtu; 408 unsigned int dev_mtu, header_len;
405 409
406 /* get routing information */ 410 /* get routing information */
407 if (is_multicast_ether_addr(ethhdr->h_dest)) { 411 if (is_multicast_ether_addr(ethhdr->h_dest)) {
@@ -428,11 +432,17 @@ find_router:
428 432
429 switch (packet_type) { 433 switch (packet_type) {
430 case BATADV_UNICAST: 434 case BATADV_UNICAST:
431 batadv_unicast_prepare_skb(skb, orig_node); 435 if (!batadv_unicast_prepare_skb(skb, orig_node))
436 goto out;
437
438 header_len = sizeof(struct batadv_unicast_packet);
432 break; 439 break;
433 case BATADV_UNICAST_4ADDR: 440 case BATADV_UNICAST_4ADDR:
434 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, 441 if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
435 packet_subtype); 442 packet_subtype))
443 goto out;
444
445 header_len = sizeof(struct batadv_unicast_4addr_packet);
436 break; 446 break;
437 default: 447 default:
438 /* this function supports UNICAST and UNICAST_4ADDR only. It 448 /* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -441,6 +451,7 @@ find_router:
441 goto out; 451 goto out;
442 } 452 }
443 453
454 ethhdr = (struct ethhdr *)(skb->data + header_len);
444 unicast_packet = (struct batadv_unicast_packet *)skb->data; 455 unicast_packet = (struct batadv_unicast_packet *)skb->data;
445 456
446 /* inform the destination node that we are still missing a correct route 457 /* inform the destination node that we are still missing a correct route
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index e3a349977595..cc27297da5a9 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -513,7 +513,10 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
513 513
514 hci_setup_event_mask(req); 514 hci_setup_event_mask(req);
515 515
516 if (hdev->hci_ver > BLUETOOTH_VER_1_1) 516 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
517 * local supported commands HCI command.
518 */
519 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
517 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 520 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
518 521
519 if (lmp_ssp_capable(hdev)) { 522 if (lmp_ssp_capable(hdev)) {
@@ -2165,10 +2168,6 @@ int hci_register_dev(struct hci_dev *hdev)
2165 2168
2166 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 2169 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2167 2170
2168 write_lock(&hci_dev_list_lock);
2169 list_add(&hdev->list, &hci_dev_list);
2170 write_unlock(&hci_dev_list_lock);
2171
2172 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | 2171 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2173 WQ_MEM_RECLAIM, 1, hdev->name); 2172 WQ_MEM_RECLAIM, 1, hdev->name);
2174 if (!hdev->workqueue) { 2173 if (!hdev->workqueue) {
@@ -2203,6 +2202,10 @@ int hci_register_dev(struct hci_dev *hdev)
2203 if (hdev->dev_type != HCI_AMP) 2202 if (hdev->dev_type != HCI_AMP)
2204 set_bit(HCI_AUTO_OFF, &hdev->dev_flags); 2203 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2205 2204
2205 write_lock(&hci_dev_list_lock);
2206 list_add(&hdev->list, &hci_dev_list);
2207 write_unlock(&hci_dev_list_lock);
2208
2206 hci_notify(hdev, HCI_DEV_REG); 2209 hci_notify(hdev, HCI_DEV_REG);
2207 hci_dev_hold(hdev); 2210 hci_dev_hold(hdev);
2208 2211
@@ -2215,9 +2218,6 @@ err_wqueue:
2215 destroy_workqueue(hdev->req_workqueue); 2218 destroy_workqueue(hdev->req_workqueue);
2216err: 2219err:
2217 ida_simple_remove(&hci_index_ida, hdev->id); 2220 ida_simple_remove(&hci_index_ida, hdev->id);
2218 write_lock(&hci_dev_list_lock);
2219 list_del(&hdev->list);
2220 write_unlock(&hci_dev_list_lock);
2221 2221
2222 return error; 2222 return error;
2223} 2223}
@@ -3399,8 +3399,16 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3399 */ 3399 */
3400 if (hdev->sent_cmd) { 3400 if (hdev->sent_cmd) {
3401 req_complete = bt_cb(hdev->sent_cmd)->req.complete; 3401 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3402 if (req_complete) 3402
3403 if (req_complete) {
3404 /* We must set the complete callback to NULL to
3405 * avoid calling the callback more than once if
3406 * this function gets called again.
3407 */
3408 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3409
3403 goto call_complete; 3410 goto call_complete;
3411 }
3404 } 3412 }
3405 3413
3406 /* Remove all pending commands belonging to this request */ 3414 /* Remove all pending commands belonging to this request */
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 2ef66781fedb..69363bd37f64 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -70,7 +70,8 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
70 } 70 }
71 71
72 mdst = br_mdb_get(br, skb, vid); 72 mdst = br_mdb_get(br, skb, vid);
73 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) 73 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
74 br_multicast_querier_exists(br))
74 br_multicast_deliver(mdst, skb); 75 br_multicast_deliver(mdst, skb);
75 else 76 else
76 br_flood_deliver(br, skb, false); 77 br_flood_deliver(br, skb, false);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 60aca9109a50..ffd5874f2592 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
161 if (!pv) 161 if (!pv)
162 return; 162 return;
163 163
164 for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 164 for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
165 f = __br_fdb_get(br, br->dev->dev_addr, vid); 165 f = __br_fdb_get(br, br->dev->dev_addr, vid);
166 if (f && f->is_local && !f->dst) 166 if (f && f->is_local && !f->dst)
167 fdb_delete(br, f); 167 fdb_delete(br, f);
@@ -730,7 +730,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
730 /* VID was specified, so use it. */ 730 /* VID was specified, so use it. */
731 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 731 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
732 } else { 732 } else {
733 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 733 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
734 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); 734 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
735 goto out; 735 goto out;
736 } 736 }
@@ -739,7 +739,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
739 * specify a VLAN. To be nice, add/update entry for every 739 * specify a VLAN. To be nice, add/update entry for every
740 * vlan on this port. 740 * vlan on this port.
741 */ 741 */
742 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 742 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
743 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 743 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
744 if (err) 744 if (err)
745 goto out; 745 goto out;
@@ -817,7 +817,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
817 817
818 err = __br_fdb_delete(p, addr, vid); 818 err = __br_fdb_delete(p, addr, vid);
819 } else { 819 } else {
820 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 820 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
821 err = __br_fdb_delete(p, addr, 0); 821 err = __br_fdb_delete(p, addr, 0);
822 goto out; 822 goto out;
823 } 823 }
@@ -827,7 +827,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
827 * vlan on this port. 827 * vlan on this port.
828 */ 828 */
829 err = -ENOENT; 829 err = -ENOENT;
830 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 830 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
831 err &= __br_fdb_delete(p, addr, vid); 831 err &= __br_fdb_delete(p, addr, vid);
832 } 832 }
833 } 833 }
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 1b8b8b824cd7..8c561c0aa636 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -101,7 +101,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
101 unicast = false; 101 unicast = false;
102 } else if (is_multicast_ether_addr(dest)) { 102 } else if (is_multicast_ether_addr(dest)) {
103 mdst = br_mdb_get(br, skb, vid); 103 mdst = br_mdb_get(br, skb, vid);
104 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 104 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
105 br_multicast_querier_exists(br)) {
105 if ((mdst && mdst->mglist) || 106 if ((mdst && mdst->mglist) ||
106 br_multicast_is_router(br)) 107 br_multicast_is_router(br))
107 skb2 = skb; 108 skb2 = skb;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 69af490cce44..08e576ada0b2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -619,6 +619,9 @@ rehash:
619 mp->br = br; 619 mp->br = br;
620 mp->addr = *group; 620 mp->addr = *group;
621 621
622 setup_timer(&mp->timer, br_multicast_group_expired,
623 (unsigned long)mp);
624
622 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 625 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
623 mdb->size++; 626 mdb->size++;
624 627
@@ -1011,6 +1014,16 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1011} 1014}
1012#endif 1015#endif
1013 1016
1017static void br_multicast_update_querier_timer(struct net_bridge *br,
1018 unsigned long max_delay)
1019{
1020 if (!timer_pending(&br->multicast_querier_timer))
1021 br->multicast_querier_delay_time = jiffies + max_delay;
1022
1023 mod_timer(&br->multicast_querier_timer,
1024 jiffies + br->multicast_querier_interval);
1025}
1026
1014/* 1027/*
1015 * Add port to router_list 1028 * Add port to router_list
1016 * list is maintained ordered by pointer value 1029 * list is maintained ordered by pointer value
@@ -1061,11 +1074,11 @@ timer:
1061 1074
1062static void br_multicast_query_received(struct net_bridge *br, 1075static void br_multicast_query_received(struct net_bridge *br,
1063 struct net_bridge_port *port, 1076 struct net_bridge_port *port,
1064 int saddr) 1077 int saddr,
1078 unsigned long max_delay)
1065{ 1079{
1066 if (saddr) 1080 if (saddr)
1067 mod_timer(&br->multicast_querier_timer, 1081 br_multicast_update_querier_timer(br, max_delay);
1068 jiffies + br->multicast_querier_interval);
1069 else if (timer_pending(&br->multicast_querier_timer)) 1082 else if (timer_pending(&br->multicast_querier_timer))
1070 return; 1083 return;
1071 1084
@@ -1093,8 +1106,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1093 (port && port->state == BR_STATE_DISABLED)) 1106 (port && port->state == BR_STATE_DISABLED))
1094 goto out; 1107 goto out;
1095 1108
1096 br_multicast_query_received(br, port, !!iph->saddr);
1097
1098 group = ih->group; 1109 group = ih->group;
1099 1110
1100 if (skb->len == sizeof(*ih)) { 1111 if (skb->len == sizeof(*ih)) {
@@ -1118,6 +1129,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1118 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1129 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1119 } 1130 }
1120 1131
1132 br_multicast_query_received(br, port, !!iph->saddr, max_delay);
1133
1121 if (!group) 1134 if (!group)
1122 goto out; 1135 goto out;
1123 1136
@@ -1126,7 +1139,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1126 if (!mp) 1139 if (!mp)
1127 goto out; 1140 goto out;
1128 1141
1129 setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
1130 mod_timer(&mp->timer, now + br->multicast_membership_interval); 1142 mod_timer(&mp->timer, now + br->multicast_membership_interval);
1131 mp->timer_armed = true; 1143 mp->timer_armed = true;
1132 1144
@@ -1174,8 +1186,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1174 (port && port->state == BR_STATE_DISABLED)) 1186 (port && port->state == BR_STATE_DISABLED))
1175 goto out; 1187 goto out;
1176 1188
1177 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
1178
1179 if (skb->len == sizeof(*mld)) { 1189 if (skb->len == sizeof(*mld)) {
1180 if (!pskb_may_pull(skb, sizeof(*mld))) { 1190 if (!pskb_may_pull(skb, sizeof(*mld))) {
1181 err = -EINVAL; 1191 err = -EINVAL;
@@ -1185,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1185 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1195 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1186 if (max_delay) 1196 if (max_delay)
1187 group = &mld->mld_mca; 1197 group = &mld->mld_mca;
1188 } else if (skb->len >= sizeof(*mld2q)) { 1198 } else {
1189 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1199 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1190 err = -EINVAL; 1200 err = -EINVAL;
1191 goto out; 1201 goto out;
@@ -1196,6 +1206,9 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1196 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; 1206 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
1197 } 1207 }
1198 1208
1209 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr),
1210 max_delay);
1211
1199 if (!group) 1212 if (!group)
1200 goto out; 1213 goto out;
1201 1214
@@ -1204,7 +1217,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1204 if (!mp) 1217 if (!mp)
1205 goto out; 1218 goto out;
1206 1219
1207 setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
1208 mod_timer(&mp->timer, now + br->multicast_membership_interval); 1220 mod_timer(&mp->timer, now + br->multicast_membership_interval);
1209 mp->timer_armed = true; 1221 mp->timer_armed = true;
1210 1222
@@ -1642,6 +1654,8 @@ void br_multicast_init(struct net_bridge *br)
1642 br->multicast_querier_interval = 255 * HZ; 1654 br->multicast_querier_interval = 255 * HZ;
1643 br->multicast_membership_interval = 260 * HZ; 1655 br->multicast_membership_interval = 260 * HZ;
1644 1656
1657 br->multicast_querier_delay_time = 0;
1658
1645 spin_lock_init(&br->multicast_lock); 1659 spin_lock_init(&br->multicast_lock);
1646 setup_timer(&br->multicast_router_timer, 1660 setup_timer(&br->multicast_router_timer,
1647 br_multicast_local_router_expired, 0); 1661 br_multicast_local_router_expired, 0);
@@ -1830,6 +1844,8 @@ unlock:
1830 1844
1831int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 1845int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1832{ 1846{
1847 unsigned long max_delay;
1848
1833 val = !!val; 1849 val = !!val;
1834 1850
1835 spin_lock_bh(&br->multicast_lock); 1851 spin_lock_bh(&br->multicast_lock);
@@ -1837,8 +1853,14 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1837 goto unlock; 1853 goto unlock;
1838 1854
1839 br->multicast_querier = val; 1855 br->multicast_querier = val;
1840 if (val) 1856 if (!val)
1841 br_multicast_start_querier(br); 1857 goto unlock;
1858
1859 max_delay = br->multicast_query_response_interval;
1860 if (!timer_pending(&br->multicast_querier_timer))
1861 br->multicast_querier_delay_time = jiffies + max_delay;
1862
1863 br_multicast_start_querier(br);
1842 1864
1843unlock: 1865unlock:
1844 spin_unlock_bh(&br->multicast_lock); 1866 spin_unlock_bh(&br->multicast_lock);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 1fc30abd3a52..b9259efa636e 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -132,7 +132,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
132 else 132 else
133 pv = br_get_vlan_info(br); 133 pv = br_get_vlan_info(br);
134 134
135 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) 135 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
136 goto done; 136 goto done;
137 137
138 af = nla_nest_start(skb, IFLA_AF_SPEC); 138 af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -140,7 +140,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
140 goto nla_put_failure; 140 goto nla_put_failure;
141 141
142 pvid = br_get_pvid(pv); 142 pvid = br_get_pvid(pv);
143 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 143 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
144 vinfo.vid = vid; 144 vinfo.vid = vid;
145 vinfo.flags = 0; 145 vinfo.flags = 0;
146 if (vid == pvid) 146 if (vid == pvid)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 3be89b3ce17b..2f7da41851bf 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -267,6 +267,7 @@ struct net_bridge
267 unsigned long multicast_query_interval; 267 unsigned long multicast_query_interval;
268 unsigned long multicast_query_response_interval; 268 unsigned long multicast_query_response_interval;
269 unsigned long multicast_startup_query_interval; 269 unsigned long multicast_startup_query_interval;
270 unsigned long multicast_querier_delay_time;
270 271
271 spinlock_t multicast_lock; 272 spinlock_t multicast_lock;
272 struct net_bridge_mdb_htable __rcu *mdb; 273 struct net_bridge_mdb_htable __rcu *mdb;
@@ -501,6 +502,13 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
501 (br->multicast_router == 1 && 502 (br->multicast_router == 1 &&
502 timer_pending(&br->multicast_router_timer)); 503 timer_pending(&br->multicast_router_timer));
503} 504}
505
506static inline bool br_multicast_querier_exists(struct net_bridge *br)
507{
508 return time_is_before_jiffies(br->multicast_querier_delay_time) &&
509 (br->multicast_querier ||
510 timer_pending(&br->multicast_querier_timer));
511}
504#else 512#else
505static inline int br_multicast_rcv(struct net_bridge *br, 513static inline int br_multicast_rcv(struct net_bridge *br,
506 struct net_bridge_port *port, 514 struct net_bridge_port *port,
@@ -557,6 +565,10 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
557{ 565{
558 return 0; 566 return 0;
559} 567}
568static inline bool br_multicast_querier_exists(struct net_bridge *br)
569{
570 return false;
571}
560static inline void br_mdb_init(void) 572static inline void br_mdb_init(void)
561{ 573{
562} 574}
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 394bb96b6087..3b9637fb7939 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Sysfs attributes of bridge ports 2 * Sysfs attributes of bridge
3 * Linux ethernet bridge 3 * Linux ethernet bridge
4 * 4 *
5 * Authors: 5 * Authors:
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index bd58b45f5f90..9a9ffe7e4019 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
108 108
109 clear_bit(vid, v->vlan_bitmap); 109 clear_bit(vid, v->vlan_bitmap);
110 v->num_vlans--; 110 v->num_vlans--;
111 if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 111 if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
112 if (v->port_idx) 112 if (v->port_idx)
113 rcu_assign_pointer(v->parent.port->vlan_info, NULL); 113 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
114 else 114 else
@@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v)
122{ 122{
123 smp_wmb(); 123 smp_wmb();
124 v->pvid = 0; 124 v->pvid = 0;
125 bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN); 125 bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
126 if (v->port_idx) 126 if (v->port_idx)
127 rcu_assign_pointer(v->parent.port->vlan_info, NULL); 127 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
128 else 128 else
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 00ee068efc1c..b84a1b155bc1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -65,6 +65,7 @@ ipv6:
65 nhoff += sizeof(struct ipv6hdr); 65 nhoff += sizeof(struct ipv6hdr);
66 break; 66 break;
67 } 67 }
68 case __constant_htons(ETH_P_8021AD):
68 case __constant_htons(ETH_P_8021Q): { 69 case __constant_htons(ETH_P_8021Q): {
69 const struct vlan_hdr *vlan; 70 const struct vlan_hdr *vlan;
70 struct vlan_hdr _vlan; 71 struct vlan_hdr _vlan;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b7de821f98df..60533db8b72d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1441 atomic_set(&p->refcnt, 1); 1441 atomic_set(&p->refcnt, 1);
1442 p->reachable_time = 1442 p->reachable_time =
1443 neigh_rand_reach_time(p->base_reachable_time); 1443 neigh_rand_reach_time(p->base_reachable_time);
1444 dev_hold(dev);
1445 p->dev = dev;
1446 write_pnet(&p->net, hold_net(net));
1447 p->sysctl_table = NULL;
1444 1448
1445 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1449 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1450 release_net(net);
1451 dev_put(dev);
1446 kfree(p); 1452 kfree(p);
1447 return NULL; 1453 return NULL;
1448 } 1454 }
1449 1455
1450 dev_hold(dev);
1451 p->dev = dev;
1452 write_pnet(&p->net, hold_net(net));
1453 p->sysctl_table = NULL;
1454 write_lock_bh(&tbl->lock); 1456 write_lock_bh(&tbl->lock);
1455 p->next = tbl->parms.next; 1457 p->next = tbl->parms.next;
1456 tbl->parms.next = p; 1458 tbl->parms.next = p;
@@ -2767,6 +2769,7 @@ EXPORT_SYMBOL(neigh_app_ns);
2767 2769
2768#ifdef CONFIG_SYSCTL 2770#ifdef CONFIG_SYSCTL
2769static int zero; 2771static int zero;
2772static int int_max = INT_MAX;
2770static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN); 2773static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2771 2774
2772static int proc_unres_qlen(struct ctl_table *ctl, int write, 2775static int proc_unres_qlen(struct ctl_table *ctl, int write,
@@ -2819,19 +2822,25 @@ static struct neigh_sysctl_table {
2819 .procname = "mcast_solicit", 2822 .procname = "mcast_solicit",
2820 .maxlen = sizeof(int), 2823 .maxlen = sizeof(int),
2821 .mode = 0644, 2824 .mode = 0644,
2822 .proc_handler = proc_dointvec, 2825 .extra1 = &zero,
2826 .extra2 = &int_max,
2827 .proc_handler = proc_dointvec_minmax,
2823 }, 2828 },
2824 [NEIGH_VAR_UCAST_PROBE] = { 2829 [NEIGH_VAR_UCAST_PROBE] = {
2825 .procname = "ucast_solicit", 2830 .procname = "ucast_solicit",
2826 .maxlen = sizeof(int), 2831 .maxlen = sizeof(int),
2827 .mode = 0644, 2832 .mode = 0644,
2828 .proc_handler = proc_dointvec, 2833 .extra1 = &zero,
2834 .extra2 = &int_max,
2835 .proc_handler = proc_dointvec_minmax,
2829 }, 2836 },
2830 [NEIGH_VAR_APP_PROBE] = { 2837 [NEIGH_VAR_APP_PROBE] = {
2831 .procname = "app_solicit", 2838 .procname = "app_solicit",
2832 .maxlen = sizeof(int), 2839 .maxlen = sizeof(int),
2833 .mode = 0644, 2840 .mode = 0644,
2834 .proc_handler = proc_dointvec, 2841 .extra1 = &zero,
2842 .extra2 = &int_max,
2843 .proc_handler = proc_dointvec_minmax,
2835 }, 2844 },
2836 [NEIGH_VAR_RETRANS_TIME] = { 2845 [NEIGH_VAR_RETRANS_TIME] = {
2837 .procname = "retrans_time", 2846 .procname = "retrans_time",
@@ -2874,7 +2883,9 @@ static struct neigh_sysctl_table {
2874 .procname = "proxy_qlen", 2883 .procname = "proxy_qlen",
2875 .maxlen = sizeof(int), 2884 .maxlen = sizeof(int),
2876 .mode = 0644, 2885 .mode = 0644,
2877 .proc_handler = proc_dointvec, 2886 .extra1 = &zero,
2887 .extra2 = &int_max,
2888 .proc_handler = proc_dointvec_minmax,
2878 }, 2889 },
2879 [NEIGH_VAR_ANYCAST_DELAY] = { 2890 [NEIGH_VAR_ANYCAST_DELAY] = {
2880 .procname = "anycast_delay", 2891 .procname = "anycast_delay",
@@ -2916,19 +2927,25 @@ static struct neigh_sysctl_table {
2916 .procname = "gc_thresh1", 2927 .procname = "gc_thresh1",
2917 .maxlen = sizeof(int), 2928 .maxlen = sizeof(int),
2918 .mode = 0644, 2929 .mode = 0644,
2919 .proc_handler = proc_dointvec, 2930 .extra1 = &zero,
2931 .extra2 = &int_max,
2932 .proc_handler = proc_dointvec_minmax,
2920 }, 2933 },
2921 [NEIGH_VAR_GC_THRESH2] = { 2934 [NEIGH_VAR_GC_THRESH2] = {
2922 .procname = "gc_thresh2", 2935 .procname = "gc_thresh2",
2923 .maxlen = sizeof(int), 2936 .maxlen = sizeof(int),
2924 .mode = 0644, 2937 .mode = 0644,
2925 .proc_handler = proc_dointvec, 2938 .extra1 = &zero,
2939 .extra2 = &int_max,
2940 .proc_handler = proc_dointvec_minmax,
2926 }, 2941 },
2927 [NEIGH_VAR_GC_THRESH3] = { 2942 [NEIGH_VAR_GC_THRESH3] = {
2928 .procname = "gc_thresh3", 2943 .procname = "gc_thresh3",
2929 .maxlen = sizeof(int), 2944 .maxlen = sizeof(int),
2930 .mode = 0644, 2945 .mode = 0644,
2931 .proc_handler = proc_dointvec, 2946 .extra1 = &zero,
2947 .extra2 = &int_max,
2948 .proc_handler = proc_dointvec_minmax,
2932 }, 2949 },
2933 {}, 2950 {},
2934 }, 2951 },
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 3de740834d1f..ca198c1d1d30 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
2156 /* If aging addresses are supported device will need to 2156 /* If aging addresses are supported device will need to
2157 * implement its own handler for this. 2157 * implement its own handler for this.
2158 */ 2158 */
2159 if (ndm->ndm_state & NUD_PERMANENT) { 2159 if (!(ndm->ndm_state & NUD_PERMANENT)) {
2160 pr_info("%s: FDB only supports static addresses\n", dev->name); 2160 pr_info("%s: FDB only supports static addresses\n", dev->name);
2161 return -EINVAL; 2161 return -EINVAL;
2162 } 2162 }
@@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2384 struct nlattr *extfilt; 2384 struct nlattr *extfilt;
2385 u32 filter_mask = 0; 2385 u32 filter_mask = 0;
2386 2386
2387 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg), 2387 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
2388 IFLA_EXT_MASK); 2388 IFLA_EXT_MASK);
2389 if (extfilt) 2389 if (extfilt)
2390 filter_mask = nla_get_u32(extfilt); 2390 filter_mask = nla_get_u32(extfilt);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 20e02d2605ec..2c3d0f53d198 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -309,7 +309,8 @@ EXPORT_SYMBOL(__alloc_skb);
309 * @frag_size: size of fragment, or 0 if head was kmalloced 309 * @frag_size: size of fragment, or 0 if head was kmalloced
310 * 310 *
311 * Allocate a new &sk_buff. Caller provides space holding head and 311 * Allocate a new &sk_buff. Caller provides space holding head and
312 * skb_shared_info. @data must have been allocated by kmalloc() 312 * skb_shared_info. @data must have been allocated by kmalloc() only if
313 * @frag_size is 0, otherwise data should come from the page allocator.
313 * The return is the new skb buffer. 314 * The return is the new skb buffer.
314 * On a failure the return is %NULL, and @data is not freed. 315 * On a failure the return is %NULL, and @data is not freed.
315 * Notes : 316 * Notes :
@@ -739,7 +740,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
739 740
740 skb_copy_secmark(new, old); 741 skb_copy_secmark(new, old);
741 742
742#ifdef CONFIG_NET_LL_RX_POLL 743#ifdef CONFIG_NET_RX_BUSY_POLL
743 new->napi_id = old->napi_id; 744 new->napi_id = old->napi_id;
744#endif 745#endif
745} 746}
diff --git a/net/core/sock.c b/net/core/sock.c
index 548d716c5f62..2c097c5a35dd 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -900,7 +900,7 @@ set_rcvbuf:
900 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); 900 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
901 break; 901 break;
902 902
903#ifdef CONFIG_NET_LL_RX_POLL 903#ifdef CONFIG_NET_RX_BUSY_POLL
904 case SO_BUSY_POLL: 904 case SO_BUSY_POLL:
905 /* allow unprivileged users to decrease the value */ 905 /* allow unprivileged users to decrease the value */
906 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) 906 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
@@ -1170,7 +1170,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1170 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); 1170 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1171 break; 1171 break;
1172 1172
1173#ifdef CONFIG_NET_LL_RX_POLL 1173#ifdef CONFIG_NET_RX_BUSY_POLL
1174 case SO_BUSY_POLL: 1174 case SO_BUSY_POLL:
1175 v.val = sk->sk_ll_usec; 1175 v.val = sk->sk_ll_usec;
1176 break; 1176 break;
@@ -2292,7 +2292,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2292 2292
2293 sk->sk_stamp = ktime_set(-1L, 0); 2293 sk->sk_stamp = ktime_set(-1L, 0);
2294 2294
2295#ifdef CONFIG_NET_LL_RX_POLL 2295#ifdef CONFIG_NET_RX_BUSY_POLL
2296 sk->sk_napi_id = 0; 2296 sk->sk_napi_id = 0;
2297 sk->sk_ll_usec = sysctl_net_busy_read; 2297 sk->sk_ll_usec = sysctl_net_busy_read;
2298#endif 2298#endif
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 660968616637..31107abd2783 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -21,7 +21,9 @@
21#include <net/net_ratelimit.h> 21#include <net/net_ratelimit.h>
22#include <net/busy_poll.h> 22#include <net/busy_poll.h>
23 23
24static int zero = 0;
24static int one = 1; 25static int one = 1;
26static int ushort_max = USHRT_MAX;
25 27
26#ifdef CONFIG_RPS 28#ifdef CONFIG_RPS
27static int rps_sock_flow_sysctl(struct ctl_table *table, int write, 29static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
@@ -298,7 +300,7 @@ static struct ctl_table net_core_table[] = {
298 .proc_handler = flow_limit_table_len_sysctl 300 .proc_handler = flow_limit_table_len_sysctl
299 }, 301 },
300#endif /* CONFIG_NET_FLOW_LIMIT */ 302#endif /* CONFIG_NET_FLOW_LIMIT */
301#ifdef CONFIG_NET_LL_RX_POLL 303#ifdef CONFIG_NET_RX_BUSY_POLL
302 { 304 {
303 .procname = "busy_poll", 305 .procname = "busy_poll",
304 .data = &sysctl_net_busy_poll, 306 .data = &sysctl_net_busy_poll,
@@ -339,7 +341,9 @@ static struct ctl_table netns_core_table[] = {
339 .data = &init_net.core.sysctl_somaxconn, 341 .data = &init_net.core.sysctl_somaxconn,
340 .maxlen = sizeof(int), 342 .maxlen = sizeof(int),
341 .mode = 0644, 343 .mode = 0644,
342 .proc_handler = proc_dointvec 344 .extra1 = &zero,
345 .extra2 = &ushort_max,
346 .proc_handler = proc_dointvec_minmax
343 }, 347 },
344 { } 348 { }
345}; 349};
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 8d48c392adcc..34ca6d5a3a4b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -772,7 +772,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
772 ci = nla_data(tb[IFA_CACHEINFO]); 772 ci = nla_data(tb[IFA_CACHEINFO]);
773 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) { 773 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
774 err = -EINVAL; 774 err = -EINVAL;
775 goto errout; 775 goto errout_free;
776 } 776 }
777 *pvalid_lft = ci->ifa_valid; 777 *pvalid_lft = ci->ifa_valid;
778 *pprefered_lft = ci->ifa_prefered; 778 *pprefered_lft = ci->ifa_prefered;
@@ -780,6 +780,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
780 780
781 return ifa; 781 return ifa;
782 782
783errout_free:
784 inet_free_ifa(ifa);
783errout: 785errout:
784 return ERR_PTR(err); 786 return ERR_PTR(err);
785} 787}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index ab3d814bc80a..109ee89f123e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
477 } 477 }
478 478
479 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - 479 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
480 net_adj) & ~(align - 1)) + (net_adj - 2); 480 net_adj) & ~(align - 1)) + net_adj - 2;
481} 481}
482 482
483static void esp4_err(struct sk_buff *skb, u32 info) 483static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 49616fed9340..3df6d3edb2a1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,7 +71,6 @@
71#include <linux/init.h> 71#include <linux/init.h>
72#include <linux/list.h> 72#include <linux/list.h>
73#include <linux/slab.h> 73#include <linux/slab.h>
74#include <linux/prefetch.h>
75#include <linux/export.h> 74#include <linux/export.h>
76#include <net/net_namespace.h> 75#include <net/net_namespace.h>
77#include <net/ip.h> 76#include <net/ip.h>
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
1761 if (!c) 1760 if (!c)
1762 continue; 1761 continue;
1763 1762
1764 if (IS_LEAF(c)) { 1763 if (IS_LEAF(c))
1765 prefetch(rcu_dereference_rtnl(p->child[idx]));
1766 return (struct leaf *) c; 1764 return (struct leaf *) c;
1767 }
1768 1765
1769 /* Rescan start scanning in new node */ 1766 /* Rescan start scanning in new node */
1770 p = (struct tnode *) c; 1767 p = (struct tnode *) c;
@@ -2133,7 +2130,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2133 max--; 2130 max--;
2134 2131
2135 pointers = 0; 2132 pointers = 0;
2136 for (i = 1; i <= max; i++) 2133 for (i = 1; i < max; i++)
2137 if (stat->nodesizes[i] != 0) { 2134 if (stat->nodesizes[i] != 0) {
2138 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]); 2135 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
2139 pointers += (1<<i) * stat->nodesizes[i]; 2136 pointers += (1<<i) * stat->nodesizes[i];
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 1f6eab66f7ce..8d6939eeb492 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
383 if (daddr) 383 if (daddr)
384 memcpy(&iph->daddr, daddr, 4); 384 memcpy(&iph->daddr, daddr, 4);
385 if (iph->daddr) 385 if (iph->daddr)
386 return t->hlen; 386 return t->hlen + sizeof(*iph);
387 387
388 return -(t->hlen + sizeof(*iph)); 388 return -(t->hlen + sizeof(*iph));
389} 389}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 7167b08977df..850525b34899 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt,
76 iph->daddr = dst; 76 iph->daddr = dst;
77 iph->saddr = src; 77 iph->saddr = src;
78 iph->ttl = ttl; 78 iph->ttl = ttl;
79 tunnel_ip_select_ident(skb, 79 __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
80 (const struct iphdr *)skb_inner_network_header(skb),
81 &rt->dst);
82 80
83 err = ip_local_out(skb); 81 err = ip_local_out(skb);
84 if (unlikely(net_xmit_eval(err))) 82 if (unlikely(net_xmit_eval(err)))
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6577a1149a47..463bd1273346 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
273 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), 273 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
274 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), 274 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
275 SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), 275 SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
276 SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), 276 SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
277 SNMP_MIB_SENTINEL 277 SNMP_MIB_SENTINEL
278}; 278};
279 279
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b2c123c44d69..610e324348d1 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -36,6 +36,8 @@ static int tcp_adv_win_scale_min = -31;
36static int tcp_adv_win_scale_max = 31; 36static int tcp_adv_win_scale_max = 31;
37static int ip_ttl_min = 1; 37static int ip_ttl_min = 1;
38static int ip_ttl_max = 255; 38static int ip_ttl_max = 255;
39static int tcp_syn_retries_min = 1;
40static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
39static int ip_ping_group_range_min[] = { 0, 0 }; 41static int ip_ping_group_range_min[] = { 0, 0 };
40static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; 42static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
41 43
@@ -332,7 +334,9 @@ static struct ctl_table ipv4_table[] = {
332 .data = &sysctl_tcp_syn_retries, 334 .data = &sysctl_tcp_syn_retries,
333 .maxlen = sizeof(int), 335 .maxlen = sizeof(int),
334 .mode = 0644, 336 .mode = 0644,
335 .proc_handler = proc_dointvec 337 .proc_handler = proc_dointvec_minmax,
338 .extra1 = &tcp_syn_retries_min,
339 .extra2 = &tcp_syn_retries_max
336 }, 340 },
337 { 341 {
338 .procname = "tcp_synack_retries", 342 .procname = "tcp_synack_retries",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5423223e93c2..b2f6c74861af 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1121,6 +1121,13 @@ new_segment:
1121 goto wait_for_memory; 1121 goto wait_for_memory;
1122 1122
1123 /* 1123 /*
1124 * All packets are restored as if they have
1125 * already been sent.
1126 */
1127 if (tp->repair)
1128 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1129
1130 /*
1124 * Check whether we can use HW checksum. 1131 * Check whether we can use HW checksum.
1125 */ 1132 */
1126 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 1133 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9077f441cb2..b6ae92a51f58 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
206 */ 206 */
207static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 207static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
208{ 208{
209 u64 offs; 209 u32 delta, bic_target, max_cnt;
210 u32 delta, t, bic_target, max_cnt; 210 u64 offs, t;
211 211
212 ca->ack_cnt++; /* count the number of ACKs */ 212 ca->ack_cnt++; /* count the number of ACKs */
213 213
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
250 * if the cwnd < 1 million packets !!! 250 * if the cwnd < 1 million packets !!!
251 */ 251 */
252 252
253 t = (s32)(tcp_time_stamp - ca->epoch_start);
254 t += msecs_to_jiffies(ca->delay_min >> 3);
253 /* change the unit from HZ to bictcp_HZ */ 255 /* change the unit from HZ to bictcp_HZ */
254 t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) 256 t <<= BICTCP_HZ;
255 - ca->epoch_start) << BICTCP_HZ) / HZ; 257 do_div(t, HZ);
256 258
257 if (t < ca->bic_K) /* t - K */ 259 if (t < ca->bic_K) /* t - K */
258 offs = ca->bic_K - t; 260 offs = ca->bic_K - t;
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
414 return; 416 return;
415 417
416 /* Discard delay samples right after fast recovery */ 418 /* Discard delay samples right after fast recovery */
417 if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) 419 if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
418 return; 420 return;
419 421
420 delay = (rtt_us << 3) / USEC_PER_MSEC; 422 delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cfdcf7b2daf6..498ea99194af 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -813,8 +813,9 @@ static u32 inet6_addr_hash(const struct in6_addr *addr)
813/* On success it returns ifp with increased reference count */ 813/* On success it returns ifp with increased reference count */
814 814
815static struct inet6_ifaddr * 815static struct inet6_ifaddr *
816ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, 816ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
817 int scope, u32 flags) 817 const struct in6_addr *peer_addr, int pfxlen,
818 int scope, u32 flags, u32 valid_lft, u32 prefered_lft)
818{ 819{
819 struct inet6_ifaddr *ifa = NULL; 820 struct inet6_ifaddr *ifa = NULL;
820 struct rt6_info *rt; 821 struct rt6_info *rt;
@@ -863,6 +864,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
863 } 864 }
864 865
865 ifa->addr = *addr; 866 ifa->addr = *addr;
867 if (peer_addr)
868 ifa->peer_addr = *peer_addr;
866 869
867 spin_lock_init(&ifa->lock); 870 spin_lock_init(&ifa->lock);
868 spin_lock_init(&ifa->state_lock); 871 spin_lock_init(&ifa->state_lock);
@@ -872,6 +875,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
872 ifa->scope = scope; 875 ifa->scope = scope;
873 ifa->prefix_len = pfxlen; 876 ifa->prefix_len = pfxlen;
874 ifa->flags = flags | IFA_F_TENTATIVE; 877 ifa->flags = flags | IFA_F_TENTATIVE;
878 ifa->valid_lft = valid_lft;
879 ifa->prefered_lft = prefered_lft;
875 ifa->cstamp = ifa->tstamp = jiffies; 880 ifa->cstamp = ifa->tstamp = jiffies;
876 ifa->tokenized = false; 881 ifa->tokenized = false;
877 882
@@ -1121,11 +1126,10 @@ retry:
1121 if (ifp->flags & IFA_F_OPTIMISTIC) 1126 if (ifp->flags & IFA_F_OPTIMISTIC)
1122 addr_flags |= IFA_F_OPTIMISTIC; 1127 addr_flags |= IFA_F_OPTIMISTIC;
1123 1128
1124 ift = !max_addresses || 1129 ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
1125 ipv6_count_addresses(idev) < max_addresses ? 1130 ipv6_addr_scope(&addr), addr_flags,
1126 ipv6_add_addr(idev, &addr, tmp_plen, ipv6_addr_scope(&addr), 1131 tmp_valid_lft, tmp_prefered_lft);
1127 addr_flags) : NULL; 1132 if (IS_ERR(ift)) {
1128 if (IS_ERR_OR_NULL(ift)) {
1129 in6_ifa_put(ifp); 1133 in6_ifa_put(ifp);
1130 in6_dev_put(idev); 1134 in6_dev_put(idev);
1131 pr_info("%s: retry temporary address regeneration\n", __func__); 1135 pr_info("%s: retry temporary address regeneration\n", __func__);
@@ -1136,8 +1140,6 @@ retry:
1136 1140
1137 spin_lock_bh(&ift->lock); 1141 spin_lock_bh(&ift->lock);
1138 ift->ifpub = ifp; 1142 ift->ifpub = ifp;
1139 ift->valid_lft = tmp_valid_lft;
1140 ift->prefered_lft = tmp_prefered_lft;
1141 ift->cstamp = now; 1143 ift->cstamp = now;
1142 ift->tstamp = tmp_tstamp; 1144 ift->tstamp = tmp_tstamp;
1143 spin_unlock_bh(&ift->lock); 1145 spin_unlock_bh(&ift->lock);
@@ -2179,16 +2181,19 @@ ok:
2179 */ 2181 */
2180 if (!max_addresses || 2182 if (!max_addresses ||
2181 ipv6_count_addresses(in6_dev) < max_addresses) 2183 ipv6_count_addresses(in6_dev) < max_addresses)
2182 ifp = ipv6_add_addr(in6_dev, &addr, pinfo->prefix_len, 2184 ifp = ipv6_add_addr(in6_dev, &addr, NULL,
2185 pinfo->prefix_len,
2183 addr_type&IPV6_ADDR_SCOPE_MASK, 2186 addr_type&IPV6_ADDR_SCOPE_MASK,
2184 addr_flags); 2187 addr_flags, valid_lft,
2188 prefered_lft);
2185 2189
2186 if (IS_ERR_OR_NULL(ifp)) { 2190 if (IS_ERR_OR_NULL(ifp)) {
2187 in6_dev_put(in6_dev); 2191 in6_dev_put(in6_dev);
2188 return; 2192 return;
2189 } 2193 }
2190 2194
2191 update_lft = create = 1; 2195 update_lft = 0;
2196 create = 1;
2192 ifp->cstamp = jiffies; 2197 ifp->cstamp = jiffies;
2193 ifp->tokenized = tokenized; 2198 ifp->tokenized = tokenized;
2194 addrconf_dad_start(ifp); 2199 addrconf_dad_start(ifp);
@@ -2209,7 +2214,7 @@ ok:
2209 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ; 2214 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2210 else 2215 else
2211 stored_lft = 0; 2216 stored_lft = 0;
2212 if (!update_lft && stored_lft) { 2217 if (!update_lft && !create && stored_lft) {
2213 if (valid_lft > MIN_VALID_LIFETIME || 2218 if (valid_lft > MIN_VALID_LIFETIME ||
2214 valid_lft > stored_lft) 2219 valid_lft > stored_lft)
2215 update_lft = 1; 2220 update_lft = 1;
@@ -2455,17 +2460,10 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
2455 prefered_lft = timeout; 2460 prefered_lft = timeout;
2456 } 2461 }
2457 2462
2458 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags); 2463 ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
2464 valid_lft, prefered_lft);
2459 2465
2460 if (!IS_ERR(ifp)) { 2466 if (!IS_ERR(ifp)) {
2461 spin_lock_bh(&ifp->lock);
2462 ifp->valid_lft = valid_lft;
2463 ifp->prefered_lft = prefered_lft;
2464 ifp->tstamp = jiffies;
2465 if (peer_pfx)
2466 ifp->peer_addr = *peer_pfx;
2467 spin_unlock_bh(&ifp->lock);
2468
2469 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev, 2467 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
2470 expires, flags); 2468 expires, flags);
2471 /* 2469 /*
@@ -2557,7 +2555,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2557{ 2555{
2558 struct inet6_ifaddr *ifp; 2556 struct inet6_ifaddr *ifp;
2559 2557
2560 ifp = ipv6_add_addr(idev, addr, plen, scope, IFA_F_PERMANENT); 2558 ifp = ipv6_add_addr(idev, addr, NULL, plen,
2559 scope, IFA_F_PERMANENT, 0, 0);
2561 if (!IS_ERR(ifp)) { 2560 if (!IS_ERR(ifp)) {
2562 spin_lock_bh(&ifp->lock); 2561 spin_lock_bh(&ifp->lock);
2563 ifp->flags &= ~IFA_F_TENTATIVE; 2562 ifp->flags &= ~IFA_F_TENTATIVE;
@@ -2683,7 +2682,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
2683#endif 2682#endif
2684 2683
2685 2684
2686 ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags); 2685 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0);
2687 if (!IS_ERR(ifp)) { 2686 if (!IS_ERR(ifp)) {
2688 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); 2687 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
2689 addrconf_dad_start(ifp); 2688 addrconf_dad_start(ifp);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 40ffd72243a4..aeac0dc3635d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
425 net_adj = 0; 425 net_adj = 0;
426 426
427 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - 427 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
428 net_adj) & ~(align - 1)) + (net_adj - 2); 428 net_adj) & ~(align - 1)) + net_adj - 2;
429} 429}
430 430
431static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 431static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5fc9c7a68d8d..c4ff5bbb45c4 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
993 993
994 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { 994 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
995#ifdef CONFIG_IPV6_SUBTREES 995#ifdef CONFIG_IPV6_SUBTREES
996 if (fn->subtree) 996 if (fn->subtree) {
997 fn = fib6_lookup_1(fn->subtree, args + 1); 997 struct fib6_node *sfn;
998 sfn = fib6_lookup_1(fn->subtree,
999 args + 1);
1000 if (!sfn)
1001 goto backtrack;
1002 fn = sfn;
1003 }
998#endif 1004#endif
999 if (!fn || fn->fn_flags & RTN_RTINFO) 1005 if (fn->fn_flags & RTN_RTINFO)
1000 return fn; 1006 return fn;
1001 } 1007 }
1002 } 1008 }
1003 1009#ifdef CONFIG_IPV6_SUBTREES
1010backtrack:
1011#endif
1004 if (fn->fn_flags & RTN_ROOT) 1012 if (fn->fn_flags & RTN_ROOT)
1005 break; 1013 break;
1006 1014
@@ -1632,27 +1640,28 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1632 1640
1633static DEFINE_SPINLOCK(fib6_gc_lock); 1641static DEFINE_SPINLOCK(fib6_gc_lock);
1634 1642
1635void fib6_run_gc(unsigned long expires, struct net *net) 1643void fib6_run_gc(unsigned long expires, struct net *net, bool force)
1636{ 1644{
1637 if (expires != ~0UL) { 1645 unsigned long now;
1646
1647 if (force) {
1638 spin_lock_bh(&fib6_gc_lock); 1648 spin_lock_bh(&fib6_gc_lock);
1639 gc_args.timeout = expires ? (int)expires : 1649 } else if (!spin_trylock_bh(&fib6_gc_lock)) {
1640 net->ipv6.sysctl.ip6_rt_gc_interval; 1650 mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
1641 } else { 1651 return;
1642 if (!spin_trylock_bh(&fib6_gc_lock)) {
1643 mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
1644 return;
1645 }
1646 gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
1647 } 1652 }
1653 gc_args.timeout = expires ? (int)expires :
1654 net->ipv6.sysctl.ip6_rt_gc_interval;
1648 1655
1649 gc_args.more = icmp6_dst_gc(); 1656 gc_args.more = icmp6_dst_gc();
1650 1657
1651 fib6_clean_all(net, fib6_age, 0, NULL); 1658 fib6_clean_all(net, fib6_age, 0, NULL);
1659 now = jiffies;
1660 net->ipv6.ip6_rt_last_gc = now;
1652 1661
1653 if (gc_args.more) 1662 if (gc_args.more)
1654 mod_timer(&net->ipv6.ip6_fib_timer, 1663 mod_timer(&net->ipv6.ip6_fib_timer,
1655 round_jiffies(jiffies 1664 round_jiffies(now
1656 + net->ipv6.sysctl.ip6_rt_gc_interval)); 1665 + net->ipv6.sysctl.ip6_rt_gc_interval));
1657 else 1666 else
1658 del_timer(&net->ipv6.ip6_fib_timer); 1667 del_timer(&net->ipv6.ip6_fib_timer);
@@ -1661,7 +1670,7 @@ void fib6_run_gc(unsigned long expires, struct net *net)
1661 1670
1662static void fib6_gc_timer_cb(unsigned long arg) 1671static void fib6_gc_timer_cb(unsigned long arg)
1663{ 1672{
1664 fib6_run_gc(0, (struct net *)arg); 1673 fib6_run_gc(0, (struct net *)arg, true);
1665} 1674}
1666 1675
1667static int __net_init fib6_net_init(struct net *net) 1676static int __net_init fib6_net_init(struct net *net)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 583e8d435f9a..03986d31fa41 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -259,10 +259,12 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
259{ 259{
260 struct mr6_table *mrt, *next; 260 struct mr6_table *mrt, *next;
261 261
262 rtnl_lock();
262 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) { 263 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
263 list_del(&mrt->list); 264 list_del(&mrt->list);
264 ip6mr_free_table(mrt); 265 ip6mr_free_table(mrt);
265 } 266 }
267 rtnl_unlock();
266 fib_rules_unregister(net->ipv6.mr6_rules_ops); 268 fib_rules_unregister(net->ipv6.mr6_rules_ops);
267} 269}
268#else 270#else
@@ -289,7 +291,10 @@ static int __net_init ip6mr_rules_init(struct net *net)
289 291
290static void __net_exit ip6mr_rules_exit(struct net *net) 292static void __net_exit ip6mr_rules_exit(struct net *net)
291{ 293{
294 rtnl_lock();
292 ip6mr_free_table(net->ipv6.mrt6); 295 ip6mr_free_table(net->ipv6.mrt6);
296 net->ipv6.mrt6 = NULL;
297 rtnl_unlock();
293} 298}
294#endif 299#endif
295 300
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 24c03396e008..04d31c2fbef1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1369,8 +1369,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1369 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) 1369 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
1370 return; 1370 return;
1371 1371
1372 if (!ndopts.nd_opts_rh) 1372 if (!ndopts.nd_opts_rh) {
1373 ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0);
1373 return; 1374 return;
1375 }
1374 1376
1375 hdr = (u8 *)ndopts.nd_opts_rh; 1377 hdr = (u8 *)ndopts.nd_opts_rh;
1376 hdr += 8; 1378 hdr += 8;
@@ -1576,7 +1578,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1576 switch (event) { 1578 switch (event) {
1577 case NETDEV_CHANGEADDR: 1579 case NETDEV_CHANGEADDR:
1578 neigh_changeaddr(&nd_tbl, dev); 1580 neigh_changeaddr(&nd_tbl, dev);
1579 fib6_run_gc(~0UL, net); 1581 fib6_run_gc(0, net, false);
1580 idev = in6_dev_get(dev); 1582 idev = in6_dev_get(dev);
1581 if (!idev) 1583 if (!idev)
1582 break; 1584 break;
@@ -1586,7 +1588,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1586 break; 1588 break;
1587 case NETDEV_DOWN: 1589 case NETDEV_DOWN:
1588 neigh_ifdown(&nd_tbl, dev); 1590 neigh_ifdown(&nd_tbl, dev);
1589 fib6_run_gc(~0UL, net); 1591 fib6_run_gc(0, net, false);
1590 break; 1592 break;
1591 case NETDEV_NOTIFY_PEERS: 1593 case NETDEV_NOTIFY_PEERS:
1592 ndisc_send_unsol_na(dev); 1594 ndisc_send_unsol_na(dev);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 790d9f4b8b0b..1aeb473b2cc6 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
490 ipv6_hdr(head)->payload_len = htons(payload_len); 490 ipv6_hdr(head)->payload_len = htons(payload_len);
491 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); 491 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
492 IP6CB(head)->nhoff = nhoff; 492 IP6CB(head)->nhoff = nhoff;
493 IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
493 494
494 /* Yes, and fold redundant checksum back. 8) */ 495 /* Yes, and fold redundant checksum back. 8) */
495 if (head->ip_summed == CHECKSUM_COMPLETE) 496 if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
524 struct net *net = dev_net(skb_dst(skb)->dev); 525 struct net *net = dev_net(skb_dst(skb)->dev);
525 int evicted; 526 int evicted;
526 527
528 if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
529 goto fail_hdr;
530
527 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 531 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
528 532
529 /* Jumbo payload inhibits frag. header */ 533 /* Jumbo payload inhibits frag. header */
@@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
544 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); 548 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
545 549
546 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 550 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
551 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
547 return 1; 552 return 1;
548 } 553 }
549 554
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a8c891aa2464..8d9a93ed9c59 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1178,6 +1178,27 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1178} 1178}
1179EXPORT_SYMBOL_GPL(ip6_redirect); 1179EXPORT_SYMBOL_GPL(ip6_redirect);
1180 1180
1181void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1182 u32 mark)
1183{
1184 const struct ipv6hdr *iph = ipv6_hdr(skb);
1185 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1186 struct dst_entry *dst;
1187 struct flowi6 fl6;
1188
1189 memset(&fl6, 0, sizeof(fl6));
1190 fl6.flowi6_oif = oif;
1191 fl6.flowi6_mark = mark;
1192 fl6.flowi6_flags = 0;
1193 fl6.daddr = msg->dest;
1194 fl6.saddr = iph->daddr;
1195
1196 dst = ip6_route_output(net, NULL, &fl6);
1197 if (!dst->error)
1198 rt6_do_redirect(dst, NULL, skb);
1199 dst_release(dst);
1200}
1201
1181void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) 1202void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1182{ 1203{
1183 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark); 1204 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
@@ -1311,7 +1332,6 @@ static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1311 1332
1312static int ip6_dst_gc(struct dst_ops *ops) 1333static int ip6_dst_gc(struct dst_ops *ops)
1313{ 1334{
1314 unsigned long now = jiffies;
1315 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops); 1335 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1316 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; 1336 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1317 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size; 1337 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
@@ -1321,13 +1341,12 @@ static int ip6_dst_gc(struct dst_ops *ops)
1321 int entries; 1341 int entries;
1322 1342
1323 entries = dst_entries_get_fast(ops); 1343 entries = dst_entries_get_fast(ops);
1324 if (time_after(rt_last_gc + rt_min_interval, now) && 1344 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1325 entries <= rt_max_size) 1345 entries <= rt_max_size)
1326 goto out; 1346 goto out;
1327 1347
1328 net->ipv6.ip6_rt_gc_expire++; 1348 net->ipv6.ip6_rt_gc_expire++;
1329 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net); 1349 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
1330 net->ipv6.ip6_rt_last_gc = now;
1331 entries = dst_entries_get_slow(ops); 1350 entries = dst_entries_get_slow(ops);
1332 if (entries < ops->gc_thresh) 1351 if (entries < ops->gc_thresh)
1333 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; 1352 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
@@ -2827,7 +2846,7 @@ int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
2827 net = (struct net *)ctl->extra1; 2846 net = (struct net *)ctl->extra1;
2828 delay = net->ipv6.sysctl.flush_delay; 2847 delay = net->ipv6.sysctl.flush_delay;
2829 proc_dointvec(ctl, write, buffer, lenp, ppos); 2848 proc_dointvec(ctl, write, buffer, lenp, ppos);
2830 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); 2849 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
2831 return 0; 2850 return 0;
2832} 2851}
2833 2852
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9da862070dd8..ab8bd2cabfa0 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2081,6 +2081,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *
2081 pol->sadb_x_policy_type = IPSEC_POLICY_NONE; 2081 pol->sadb_x_policy_type = IPSEC_POLICY_NONE;
2082 } 2082 }
2083 pol->sadb_x_policy_dir = dir+1; 2083 pol->sadb_x_policy_dir = dir+1;
2084 pol->sadb_x_policy_reserved = 0;
2084 pol->sadb_x_policy_id = xp->index; 2085 pol->sadb_x_policy_id = xp->index;
2085 pol->sadb_x_policy_priority = xp->priority; 2086 pol->sadb_x_policy_priority = xp->priority;
2086 2087
@@ -3137,7 +3138,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3137 pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY; 3138 pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
3138 pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC; 3139 pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
3139 pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1; 3140 pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
3141 pol->sadb_x_policy_reserved = 0;
3140 pol->sadb_x_policy_id = xp->index; 3142 pol->sadb_x_policy_id = xp->index;
3143 pol->sadb_x_policy_priority = xp->priority;
3141 3144
3142 /* Set sadb_comb's. */ 3145 /* Set sadb_comb's. */
3143 if (x->id.proto == IPPROTO_AH) 3146 if (x->id.proto == IPPROTO_AH)
@@ -3525,6 +3528,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3525 pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY; 3528 pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
3526 pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC; 3529 pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
3527 pol->sadb_x_policy_dir = dir + 1; 3530 pol->sadb_x_policy_dir = dir + 1;
3531 pol->sadb_x_policy_reserved = 0;
3528 pol->sadb_x_policy_id = 0; 3532 pol->sadb_x_policy_id = 0;
3529 pol->sadb_x_policy_priority = 0; 3533 pol->sadb_x_policy_priority = 0;
3530 3534
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 8184d121ff09..43dd7525bfcb 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -666,6 +666,8 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
666 if (sta->sdata->dev != dev) 666 if (sta->sdata->dev != dev)
667 continue; 667 continue;
668 668
669 sinfo.filled = 0;
670 sta_set_sinfo(sta, &sinfo);
669 i = 0; 671 i = 0;
670 ADD_STA_STATS(sta); 672 ADD_STA_STATS(sta);
671 } 673 }
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 3b7bfc01ee36..22290a929b94 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -229,6 +229,10 @@ void ieee80211_mps_sta_status_update(struct sta_info *sta)
229 enum nl80211_mesh_power_mode pm; 229 enum nl80211_mesh_power_mode pm;
230 bool do_buffer; 230 bool do_buffer;
231 231
232 /* For non-assoc STA, prevent buffering or frame transmission */
233 if (sta->sta_state < IEEE80211_STA_ASSOC)
234 return;
235
232 /* 236 /*
233 * use peer-specific power mode if peering is established and the 237 * use peer-specific power mode if peering is established and the
234 * peer's power mode is known 238 * peer's power mode is known
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ae31968d42d3..cc9e02d79b55 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -31,10 +31,12 @@
31#include "led.h" 31#include "led.h"
32 32
33#define IEEE80211_AUTH_TIMEOUT (HZ / 5) 33#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
34#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
34#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) 35#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
35#define IEEE80211_AUTH_MAX_TRIES 3 36#define IEEE80211_AUTH_MAX_TRIES 3
36#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) 37#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
37#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 38#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
39#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
38#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) 40#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
39#define IEEE80211_ASSOC_MAX_TRIES 3 41#define IEEE80211_ASSOC_MAX_TRIES 3
40 42
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
209 struct ieee80211_channel *channel, 211 struct ieee80211_channel *channel,
210 const struct ieee80211_ht_operation *ht_oper, 212 const struct ieee80211_ht_operation *ht_oper,
211 const struct ieee80211_vht_operation *vht_oper, 213 const struct ieee80211_vht_operation *vht_oper,
212 struct cfg80211_chan_def *chandef, bool verbose) 214 struct cfg80211_chan_def *chandef, bool tracking)
213{ 215{
216 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
214 struct cfg80211_chan_def vht_chandef; 217 struct cfg80211_chan_def vht_chandef;
215 u32 ht_cfreq, ret; 218 u32 ht_cfreq, ret;
216 219
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
229 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, 232 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
230 channel->band); 233 channel->band);
231 /* check that channel matches the right operating channel */ 234 /* check that channel matches the right operating channel */
232 if (channel->center_freq != ht_cfreq) { 235 if (!tracking && channel->center_freq != ht_cfreq) {
233 /* 236 /*
234 * It's possible that some APs are confused here; 237 * It's possible that some APs are confused here;
235 * Netgear WNDR3700 sometimes reports 4 higher than 238 * Netgear WNDR3700 sometimes reports 4 higher than
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
237 * since we look at probe response/beacon data here 240 * since we look at probe response/beacon data here
238 * it should be OK. 241 * it should be OK.
239 */ 242 */
240 if (verbose) 243 sdata_info(sdata,
241 sdata_info(sdata, 244 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
242 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", 245 channel->center_freq, ht_cfreq,
243 channel->center_freq, ht_cfreq, 246 ht_oper->primary_chan, channel->band);
244 ht_oper->primary_chan, channel->band);
245 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; 247 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
246 goto out; 248 goto out;
247 } 249 }
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
295 channel->band); 297 channel->band);
296 break; 298 break;
297 default: 299 default:
298 if (verbose) 300 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
299 sdata_info(sdata, 301 sdata_info(sdata,
300 "AP VHT operation IE has invalid channel width (%d), disable VHT\n", 302 "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
301 vht_oper->chan_width); 303 vht_oper->chan_width);
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
304 } 306 }
305 307
306 if (!cfg80211_chandef_valid(&vht_chandef)) { 308 if (!cfg80211_chandef_valid(&vht_chandef)) {
307 if (verbose) 309 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
308 sdata_info(sdata, 310 sdata_info(sdata,
309 "AP VHT information is invalid, disable VHT\n"); 311 "AP VHT information is invalid, disable VHT\n");
310 ret = IEEE80211_STA_DISABLE_VHT; 312 ret = IEEE80211_STA_DISABLE_VHT;
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
317 } 319 }
318 320
319 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { 321 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
320 if (verbose) 322 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
321 sdata_info(sdata, 323 sdata_info(sdata,
322 "AP VHT information doesn't match HT, disable VHT\n"); 324 "AP VHT information doesn't match HT, disable VHT\n");
323 ret = IEEE80211_STA_DISABLE_VHT; 325 ret = IEEE80211_STA_DISABLE_VHT;
@@ -333,18 +335,27 @@ out:
333 if (ret & IEEE80211_STA_DISABLE_VHT) 335 if (ret & IEEE80211_STA_DISABLE_VHT)
334 vht_chandef = *chandef; 336 vht_chandef = *chandef;
335 337
338 /*
339 * Ignore the DISABLED flag when we're already connected and only
340 * tracking the APs beacon for bandwidth changes - otherwise we
341 * might get disconnected here if we connect to an AP, update our
342 * regulatory information based on the AP's country IE and the
343 * information we have is wrong/outdated and disables the channel
344 * that we're actually using for the connection to the AP.
345 */
336 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, 346 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
337 IEEE80211_CHAN_DISABLED)) { 347 tracking ? 0 :
348 IEEE80211_CHAN_DISABLED)) {
338 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { 349 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
339 ret = IEEE80211_STA_DISABLE_HT | 350 ret = IEEE80211_STA_DISABLE_HT |
340 IEEE80211_STA_DISABLE_VHT; 351 IEEE80211_STA_DISABLE_VHT;
341 goto out; 352 break;
342 } 353 }
343 354
344 ret |= chandef_downgrade(chandef); 355 ret |= chandef_downgrade(chandef);
345 } 356 }
346 357
347 if (chandef->width != vht_chandef.width && verbose) 358 if (chandef->width != vht_chandef.width && !tracking)
348 sdata_info(sdata, 359 sdata_info(sdata,
349 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); 360 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
350 361
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
384 395
385 /* calculate new channel (type) based on HT/VHT operation IEs */ 396 /* calculate new channel (type) based on HT/VHT operation IEs */
386 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, 397 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
387 vht_oper, &chandef, false); 398 vht_oper, &chandef, true);
388 399
389 /* 400 /*
390 * Downgrade the new channel if we associated with restricted 401 * Downgrade the new channel if we associated with restricted
@@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
3394 3405
3395 if (tx_flags == 0) { 3406 if (tx_flags == 0) {
3396 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 3407 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
3397 ifmgd->auth_data->timeout_started = true; 3408 auth_data->timeout_started = true;
3398 run_again(sdata, auth_data->timeout); 3409 run_again(sdata, auth_data->timeout);
3399 } else { 3410 } else {
3400 auth_data->timeout_started = false; 3411 auth_data->timeout =
3412 round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
3413 auth_data->timeout_started = true;
3414 run_again(sdata, auth_data->timeout);
3401 } 3415 }
3402 3416
3403 return 0; 3417 return 0;
@@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
3434 assoc_data->timeout_started = true; 3448 assoc_data->timeout_started = true;
3435 run_again(sdata, assoc_data->timeout); 3449 run_again(sdata, assoc_data->timeout);
3436 } else { 3450 } else {
3437 assoc_data->timeout_started = false; 3451 assoc_data->timeout =
3452 round_jiffies_up(jiffies +
3453 IEEE80211_ASSOC_TIMEOUT_LONG);
3454 assoc_data->timeout_started = true;
3455 run_again(sdata, assoc_data->timeout);
3438 } 3456 }
3439 3457
3440 return 0; 3458 return 0;
@@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3829 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, 3847 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
3830 cbss->channel, 3848 cbss->channel,
3831 ht_oper, vht_oper, 3849 ht_oper, vht_oper,
3832 &chandef, true); 3850 &chandef, false);
3833 3851
3834 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), 3852 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
3835 local->rx_chains); 3853 local->rx_chains);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 7fc5d0d8149a..340126204343 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -99,10 +99,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
99 } 99 }
100 mutex_unlock(&local->sta_mtx); 100 mutex_unlock(&local->sta_mtx);
101 101
102 /* remove all interfaces */ 102 /* remove all interfaces that were created in the driver */
103 list_for_each_entry(sdata, &local->interfaces, list) { 103 list_for_each_entry(sdata, &local->interfaces, list) {
104 if (!ieee80211_sdata_running(sdata)) 104 if (!ieee80211_sdata_running(sdata) ||
105 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
106 sdata->vif.type == NL80211_IFTYPE_MONITOR)
105 continue; 107 continue;
108
106 drv_remove_interface(local, sdata); 109 drv_remove_interface(local, sdata);
107 } 110 }
108 111
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index ac7ef5414bde..e6512e2ffd20 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -290,7 +290,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
290 struct minstrel_rate *msr, *mr; 290 struct minstrel_rate *msr, *mr;
291 unsigned int ndx; 291 unsigned int ndx;
292 bool mrr_capable; 292 bool mrr_capable;
293 bool prev_sample = mi->prev_sample; 293 bool prev_sample;
294 int delta; 294 int delta;
295 int sampling_ratio; 295 int sampling_ratio;
296 296
@@ -314,6 +314,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
314 (mi->sample_count + mi->sample_deferred / 2); 314 (mi->sample_count + mi->sample_deferred / 2);
315 315
316 /* delta < 0: no sampling required */ 316 /* delta < 0: no sampling required */
317 prev_sample = mi->prev_sample;
317 mi->prev_sample = false; 318 mi->prev_sample = false;
318 if (delta < 0 || (!mrr_capable && prev_sample)) 319 if (delta < 0 || (!mrr_capable && prev_sample))
319 return; 320 return;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 5b2d3012b983..f5aed963b22e 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -804,10 +804,18 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
804 804
805 sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES]; 805 sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
806 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; 806 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
807 rate->count = 1;
808
809 if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
810 int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
811 rate->idx = mp->cck_rates[idx];
812 rate->flags = 0;
813 return;
814 }
815
807 rate->idx = sample_idx % MCS_GROUP_RATES + 816 rate->idx = sample_idx % MCS_GROUP_RATES +
808 (sample_group->streams - 1) * MCS_GROUP_RATES; 817 (sample_group->streams - 1) * MCS_GROUP_RATES;
809 rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags; 818 rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags;
810 rate->count = 1;
811} 819}
812 820
813static void 821static void
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 23dbcfc69b3b..2c5a79bd3777 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -936,8 +936,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
936 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 936 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
937 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 937 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
938 938
939 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ 939 /*
940 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { 940 * Drop duplicate 802.11 retransmissions
941 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
942 */
943 if (rx->skb->len >= 24 && rx->sta &&
944 !ieee80211_is_ctl(hdr->frame_control) &&
945 !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
946 !is_multicast_ether_addr(hdr->addr1)) {
941 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 947 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
942 rx->sta->last_seq_ctrl[rx->seqno_idx] == 948 rx->sta->last_seq_ctrl[rx->seqno_idx] ==
943 hdr->seq_ctrl)) { 949 hdr->seq_ctrl)) {
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index c63b618cd619..4fd1ca94fd4a 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -293,6 +293,11 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
293 sizeof(exp->tuple.dst.u3) - len); 293 sizeof(exp->tuple.dst.u3) - len);
294 294
295 exp->tuple.dst.u.all = *dst; 295 exp->tuple.dst.u.all = *dst;
296
297#ifdef CONFIG_NF_NAT_NEEDED
298 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
299 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
300#endif
296} 301}
297EXPORT_SYMBOL_GPL(nf_ct_expect_init); 302EXPORT_SYMBOL_GPL(nf_ct_expect_init);
298 303
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 7dcc376eea5f..2f8010707d01 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
527 __u32 seq, ack, sack, end, win, swin; 527 __u32 seq, ack, sack, end, win, swin;
528 s16 receiver_offset; 528 s16 receiver_offset;
529 bool res; 529 bool res, in_recv_win;
530 530
531 /* 531 /*
532 * Get the required data from the packet. 532 * Get the required data from the packet.
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
649 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 649 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
650 receiver->td_scale); 650 receiver->td_scale);
651 651
652 /* Is the ending sequence in the receive window (if available)? */
653 in_recv_win = !receiver->td_maxwin ||
654 after(end, sender->td_end - receiver->td_maxwin - 1);
655
652 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", 656 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
653 before(seq, sender->td_maxend + 1), 657 before(seq, sender->td_maxend + 1),
654 after(end, sender->td_end - receiver->td_maxwin - 1), 658 (in_recv_win ? 1 : 0),
655 before(sack, receiver->td_end + 1), 659 before(sack, receiver->td_end + 1),
656 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); 660 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
657 661
658 if (before(seq, sender->td_maxend + 1) && 662 if (before(seq, sender->td_maxend + 1) &&
659 after(end, sender->td_end - receiver->td_maxwin - 1) && 663 in_recv_win &&
660 before(sack, receiver->td_end + 1) && 664 before(sack, receiver->td_end + 1) &&
661 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { 665 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
662 /* 666 /*
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
725 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 729 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
726 "nf_ct_tcp: %s ", 730 "nf_ct_tcp: %s ",
727 before(seq, sender->td_maxend + 1) ? 731 before(seq, sender->td_maxend + 1) ?
728 after(end, sender->td_end - receiver->td_maxwin - 1) ? 732 in_recv_win ?
729 before(sack, receiver->td_end + 1) ? 733 before(sack, receiver->td_end + 1) ?
730 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" 734 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
731 : "ACK is under the lower bound (possible overly delayed ACK)" 735 : "ACK is under the lower bound (possible overly delayed ACK)"
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 962e9792e317..d92cc317bf8b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
419 nfmsg->version = NFNETLINK_V0; 419 nfmsg->version = NFNETLINK_V0;
420 nfmsg->res_id = htons(inst->group_num); 420 nfmsg->res_id = htons(inst->group_num);
421 421
422 memset(&pmsg, 0, sizeof(pmsg));
422 pmsg.hw_protocol = skb->protocol; 423 pmsg.hw_protocol = skb->protocol;
423 pmsg.hook = hooknum; 424 pmsg.hook = hooknum;
424 425
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
498 if (indev && skb->dev && 499 if (indev && skb->dev &&
499 skb->mac_header != skb->network_header) { 500 skb->mac_header != skb->network_header) {
500 struct nfulnl_msg_packet_hw phw; 501 struct nfulnl_msg_packet_hw phw;
501 int len = dev_parse_header(skb, phw.hw_addr); 502 int len;
503
504 memset(&phw, 0, sizeof(phw));
505 len = dev_parse_header(skb, phw.hw_addr);
502 if (len > 0) { 506 if (len > 0) {
503 phw.hw_addrlen = htons(len); 507 phw.hw_addrlen = htons(len);
504 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) 508 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 971ea145ab3e..8a703c3dd318 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
463 if (indev && entskb->dev && 463 if (indev && entskb->dev &&
464 entskb->mac_header != entskb->network_header) { 464 entskb->mac_header != entskb->network_header) {
465 struct nfqnl_msg_packet_hw phw; 465 struct nfqnl_msg_packet_hw phw;
466 int len = dev_parse_header(entskb, phw.hw_addr); 466 int len;
467
468 memset(&phw, 0, sizeof(phw));
469 len = dev_parse_header(entskb, phw.hw_addr);
467 if (len) { 470 if (len) {
468 phw.hw_addrlen = htons(len); 471 phw.hw_addrlen = htons(len);
469 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) 472 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 7011c71646f0..6113cc7efffc 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
52{ 52{
53 const struct xt_tcpmss_info *info = par->targinfo; 53 const struct xt_tcpmss_info *info = par->targinfo;
54 struct tcphdr *tcph; 54 struct tcphdr *tcph;
55 unsigned int tcplen, i; 55 int len, tcp_hdrlen;
56 unsigned int i;
56 __be16 oldval; 57 __be16 oldval;
57 u16 newmss; 58 u16 newmss;
58 u8 *opt; 59 u8 *opt;
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
64 if (!skb_make_writable(skb, skb->len)) 65 if (!skb_make_writable(skb, skb->len))
65 return -1; 66 return -1;
66 67
67 tcplen = skb->len - tcphoff; 68 len = skb->len - tcphoff;
69 if (len < (int)sizeof(struct tcphdr))
70 return -1;
71
68 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 72 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
73 tcp_hdrlen = tcph->doff * 4;
69 74
70 /* Header cannot be larger than the packet */ 75 if (len < tcp_hdrlen)
71 if (tcplen < tcph->doff*4)
72 return -1; 76 return -1;
73 77
74 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 78 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
87 newmss = info->mss; 91 newmss = info->mss;
88 92
89 opt = (u_int8_t *)tcph; 93 opt = (u_int8_t *)tcph;
90 for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) { 94 for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
91 if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS && 95 if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
92 opt[i+1] == TCPOLEN_MSS) {
93 u_int16_t oldmss; 96 u_int16_t oldmss;
94 97
95 oldmss = (opt[i+2] << 8) | opt[i+3]; 98 oldmss = (opt[i+2] << 8) | opt[i+3];
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
112 } 115 }
113 116
114 /* There is data after the header so the option can't be added 117 /* There is data after the header so the option can't be added
115 without moving it, and doing so may make the SYN packet 118 * without moving it, and doing so may make the SYN packet
116 itself too large. Accept the packet unmodified instead. */ 119 * itself too large. Accept the packet unmodified instead.
117 if (tcplen > tcph->doff*4) 120 */
121 if (len > tcp_hdrlen)
118 return 0; 122 return 0;
119 123
120 /* 124 /*
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
143 newmss = min(newmss, (u16)1220); 147 newmss = min(newmss, (u16)1220);
144 148
145 opt = (u_int8_t *)tcph + sizeof(struct tcphdr); 149 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
146 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); 150 memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
147 151
148 inet_proto_csum_replace2(&tcph->check, skb, 152 inet_proto_csum_replace2(&tcph->check, skb,
149 htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); 153 htons(len), htons(len + TCPOLEN_MSS), 1);
150 opt[0] = TCPOPT_MSS; 154 opt[0] = TCPOPT_MSS;
151 opt[1] = TCPOLEN_MSS; 155 opt[1] = TCPOLEN_MSS;
152 opt[2] = (newmss & 0xff00) >> 8; 156 opt[2] = (newmss & 0xff00) >> 8;
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index b68fa191710f..625fa1d636a0 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
38 struct tcphdr *tcph; 38 struct tcphdr *tcph;
39 u_int16_t n, o; 39 u_int16_t n, o;
40 u_int8_t *opt; 40 u_int8_t *opt;
41 int len; 41 int len, tcp_hdrlen;
42 42
43 /* This is a fragment, no TCP header is available */ 43 /* This is a fragment, no TCP header is available */
44 if (par->fragoff != 0) 44 if (par->fragoff != 0)
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
52 return NF_DROP; 52 return NF_DROP;
53 53
54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
55 if (tcph->doff * 4 > len) 55 tcp_hdrlen = tcph->doff * 4;
56
57 if (len < tcp_hdrlen)
56 return NF_DROP; 58 return NF_DROP;
57 59
58 opt = (u_int8_t *)tcph; 60 opt = (u_int8_t *)tcph;
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
61 * Walk through all TCP options - if we find some option to remove, 63 * Walk through all TCP options - if we find some option to remove,
62 * set all octets to %TCPOPT_NOP and adjust checksum. 64 * set all octets to %TCPOPT_NOP and adjust checksum.
63 */ 65 */
64 for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { 66 for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
65 optl = optlen(opt, i); 67 optl = optlen(opt, i);
66 68
67 if (i + optl > tcp_hdrlen(skb)) 69 if (i + optl > tcp_hdrlen)
68 break; 70 break;
69 71
70 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) 72 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index f8b71911037a..20b15916f403 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -172,7 +172,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
172 172
173 /* Ignore non-transparent sockets, 173 /* Ignore non-transparent sockets,
174 if XT_SOCKET_TRANSPARENT is used */ 174 if XT_SOCKET_TRANSPARENT is used */
175 if (info && info->flags & XT_SOCKET_TRANSPARENT) 175 if (info->flags & XT_SOCKET_TRANSPARENT)
176 transparent = ((sk->sk_state != TCP_TIME_WAIT && 176 transparent = ((sk->sk_state != TCP_TIME_WAIT &&
177 inet_sk(sk)->transparent) || 177 inet_sk(sk)->transparent) ||
178 (sk->sk_state == TCP_TIME_WAIT && 178 (sk->sk_state == TCP_TIME_WAIT &&
@@ -196,7 +196,11 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
196static bool 196static bool
197socket_mt4_v0(const struct sk_buff *skb, struct xt_action_param *par) 197socket_mt4_v0(const struct sk_buff *skb, struct xt_action_param *par)
198{ 198{
199 return socket_match(skb, par, NULL); 199 static struct xt_socket_mtinfo1 xt_info_v0 = {
200 .flags = 0,
201 };
202
203 return socket_match(skb, par, &xt_info_v0);
200} 204}
201 205
202static bool 206static bool
@@ -314,7 +318,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
314 318
315 /* Ignore non-transparent sockets, 319 /* Ignore non-transparent sockets,
316 if XT_SOCKET_TRANSPARENT is used */ 320 if XT_SOCKET_TRANSPARENT is used */
317 if (info && info->flags & XT_SOCKET_TRANSPARENT) 321 if (info->flags & XT_SOCKET_TRANSPARENT)
318 transparent = ((sk->sk_state != TCP_TIME_WAIT && 322 transparent = ((sk->sk_state != TCP_TIME_WAIT &&
319 inet_sk(sk)->transparent) || 323 inet_sk(sk)->transparent) ||
320 (sk->sk_state == TCP_TIME_WAIT && 324 (sk->sk_state == TCP_TIME_WAIT &&
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index c15042f987bd..a1100640495d 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -691,8 +691,8 @@ static int netlbl_cipsov4_remove_cb(struct netlbl_dom_map *entry, void *arg)
691{ 691{
692 struct netlbl_domhsh_walk_arg *cb_arg = arg; 692 struct netlbl_domhsh_walk_arg *cb_arg = arg;
693 693
694 if (entry->type == NETLBL_NLTYPE_CIPSOV4 && 694 if (entry->def.type == NETLBL_NLTYPE_CIPSOV4 &&
695 entry->type_def.cipsov4->doi == cb_arg->doi) 695 entry->def.cipso->doi == cb_arg->doi)
696 return netlbl_domhsh_remove_entry(entry, cb_arg->audit_info); 696 return netlbl_domhsh_remove_entry(entry, cb_arg->audit_info);
697 697
698 return 0; 698 return 0;
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 6bb1d42f0fac..85d842e6e431 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -84,15 +84,15 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
84#endif /* IPv6 */ 84#endif /* IPv6 */
85 85
86 ptr = container_of(entry, struct netlbl_dom_map, rcu); 86 ptr = container_of(entry, struct netlbl_dom_map, rcu);
87 if (ptr->type == NETLBL_NLTYPE_ADDRSELECT) { 87 if (ptr->def.type == NETLBL_NLTYPE_ADDRSELECT) {
88 netlbl_af4list_foreach_safe(iter4, tmp4, 88 netlbl_af4list_foreach_safe(iter4, tmp4,
89 &ptr->type_def.addrsel->list4) { 89 &ptr->def.addrsel->list4) {
90 netlbl_af4list_remove_entry(iter4); 90 netlbl_af4list_remove_entry(iter4);
91 kfree(netlbl_domhsh_addr4_entry(iter4)); 91 kfree(netlbl_domhsh_addr4_entry(iter4));
92 } 92 }
93#if IS_ENABLED(CONFIG_IPV6) 93#if IS_ENABLED(CONFIG_IPV6)
94 netlbl_af6list_foreach_safe(iter6, tmp6, 94 netlbl_af6list_foreach_safe(iter6, tmp6,
95 &ptr->type_def.addrsel->list6) { 95 &ptr->def.addrsel->list6) {
96 netlbl_af6list_remove_entry(iter6); 96 netlbl_af6list_remove_entry(iter6);
97 kfree(netlbl_domhsh_addr6_entry(iter6)); 97 kfree(netlbl_domhsh_addr6_entry(iter6));
98 } 98 }
@@ -213,21 +213,21 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
213 if (addr4 != NULL) { 213 if (addr4 != NULL) {
214 struct netlbl_domaddr4_map *map4; 214 struct netlbl_domaddr4_map *map4;
215 map4 = netlbl_domhsh_addr4_entry(addr4); 215 map4 = netlbl_domhsh_addr4_entry(addr4);
216 type = map4->type; 216 type = map4->def.type;
217 cipsov4 = map4->type_def.cipsov4; 217 cipsov4 = map4->def.cipso;
218 netlbl_af4list_audit_addr(audit_buf, 0, NULL, 218 netlbl_af4list_audit_addr(audit_buf, 0, NULL,
219 addr4->addr, addr4->mask); 219 addr4->addr, addr4->mask);
220#if IS_ENABLED(CONFIG_IPV6) 220#if IS_ENABLED(CONFIG_IPV6)
221 } else if (addr6 != NULL) { 221 } else if (addr6 != NULL) {
222 struct netlbl_domaddr6_map *map6; 222 struct netlbl_domaddr6_map *map6;
223 map6 = netlbl_domhsh_addr6_entry(addr6); 223 map6 = netlbl_domhsh_addr6_entry(addr6);
224 type = map6->type; 224 type = map6->def.type;
225 netlbl_af6list_audit_addr(audit_buf, 0, NULL, 225 netlbl_af6list_audit_addr(audit_buf, 0, NULL,
226 &addr6->addr, &addr6->mask); 226 &addr6->addr, &addr6->mask);
227#endif /* IPv6 */ 227#endif /* IPv6 */
228 } else { 228 } else {
229 type = entry->type; 229 type = entry->def.type;
230 cipsov4 = entry->type_def.cipsov4; 230 cipsov4 = entry->def.cipso;
231 } 231 }
232 switch (type) { 232 switch (type) {
233 case NETLBL_NLTYPE_UNLABELED: 233 case NETLBL_NLTYPE_UNLABELED:
@@ -265,26 +265,25 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
265 if (entry == NULL) 265 if (entry == NULL)
266 return -EINVAL; 266 return -EINVAL;
267 267
268 switch (entry->type) { 268 switch (entry->def.type) {
269 case NETLBL_NLTYPE_UNLABELED: 269 case NETLBL_NLTYPE_UNLABELED:
270 if (entry->type_def.cipsov4 != NULL || 270 if (entry->def.cipso != NULL || entry->def.addrsel != NULL)
271 entry->type_def.addrsel != NULL)
272 return -EINVAL; 271 return -EINVAL;
273 break; 272 break;
274 case NETLBL_NLTYPE_CIPSOV4: 273 case NETLBL_NLTYPE_CIPSOV4:
275 if (entry->type_def.cipsov4 == NULL) 274 if (entry->def.cipso == NULL)
276 return -EINVAL; 275 return -EINVAL;
277 break; 276 break;
278 case NETLBL_NLTYPE_ADDRSELECT: 277 case NETLBL_NLTYPE_ADDRSELECT:
279 netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) { 278 netlbl_af4list_foreach(iter4, &entry->def.addrsel->list4) {
280 map4 = netlbl_domhsh_addr4_entry(iter4); 279 map4 = netlbl_domhsh_addr4_entry(iter4);
281 switch (map4->type) { 280 switch (map4->def.type) {
282 case NETLBL_NLTYPE_UNLABELED: 281 case NETLBL_NLTYPE_UNLABELED:
283 if (map4->type_def.cipsov4 != NULL) 282 if (map4->def.cipso != NULL)
284 return -EINVAL; 283 return -EINVAL;
285 break; 284 break;
286 case NETLBL_NLTYPE_CIPSOV4: 285 case NETLBL_NLTYPE_CIPSOV4:
287 if (map4->type_def.cipsov4 == NULL) 286 if (map4->def.cipso == NULL)
288 return -EINVAL; 287 return -EINVAL;
289 break; 288 break;
290 default: 289 default:
@@ -292,9 +291,9 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
292 } 291 }
293 } 292 }
294#if IS_ENABLED(CONFIG_IPV6) 293#if IS_ENABLED(CONFIG_IPV6)
295 netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) { 294 netlbl_af6list_foreach(iter6, &entry->def.addrsel->list6) {
296 map6 = netlbl_domhsh_addr6_entry(iter6); 295 map6 = netlbl_domhsh_addr6_entry(iter6);
297 switch (map6->type) { 296 switch (map6->def.type) {
298 case NETLBL_NLTYPE_UNLABELED: 297 case NETLBL_NLTYPE_UNLABELED:
299 break; 298 break;
300 default: 299 default:
@@ -402,32 +401,31 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
402 rcu_assign_pointer(netlbl_domhsh_def, entry); 401 rcu_assign_pointer(netlbl_domhsh_def, entry);
403 } 402 }
404 403
405 if (entry->type == NETLBL_NLTYPE_ADDRSELECT) { 404 if (entry->def.type == NETLBL_NLTYPE_ADDRSELECT) {
406 netlbl_af4list_foreach_rcu(iter4, 405 netlbl_af4list_foreach_rcu(iter4,
407 &entry->type_def.addrsel->list4) 406 &entry->def.addrsel->list4)
408 netlbl_domhsh_audit_add(entry, iter4, NULL, 407 netlbl_domhsh_audit_add(entry, iter4, NULL,
409 ret_val, audit_info); 408 ret_val, audit_info);
410#if IS_ENABLED(CONFIG_IPV6) 409#if IS_ENABLED(CONFIG_IPV6)
411 netlbl_af6list_foreach_rcu(iter6, 410 netlbl_af6list_foreach_rcu(iter6,
412 &entry->type_def.addrsel->list6) 411 &entry->def.addrsel->list6)
413 netlbl_domhsh_audit_add(entry, NULL, iter6, 412 netlbl_domhsh_audit_add(entry, NULL, iter6,
414 ret_val, audit_info); 413 ret_val, audit_info);
415#endif /* IPv6 */ 414#endif /* IPv6 */
416 } else 415 } else
417 netlbl_domhsh_audit_add(entry, NULL, NULL, 416 netlbl_domhsh_audit_add(entry, NULL, NULL,
418 ret_val, audit_info); 417 ret_val, audit_info);
419 } else if (entry_old->type == NETLBL_NLTYPE_ADDRSELECT && 418 } else if (entry_old->def.type == NETLBL_NLTYPE_ADDRSELECT &&
420 entry->type == NETLBL_NLTYPE_ADDRSELECT) { 419 entry->def.type == NETLBL_NLTYPE_ADDRSELECT) {
421 struct list_head *old_list4; 420 struct list_head *old_list4;
422 struct list_head *old_list6; 421 struct list_head *old_list6;
423 422
424 old_list4 = &entry_old->type_def.addrsel->list4; 423 old_list4 = &entry_old->def.addrsel->list4;
425 old_list6 = &entry_old->type_def.addrsel->list6; 424 old_list6 = &entry_old->def.addrsel->list6;
426 425
427 /* we only allow the addition of address selectors if all of 426 /* we only allow the addition of address selectors if all of
428 * the selectors do not exist in the existing domain map */ 427 * the selectors do not exist in the existing domain map */
429 netlbl_af4list_foreach_rcu(iter4, 428 netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4)
430 &entry->type_def.addrsel->list4)
431 if (netlbl_af4list_search_exact(iter4->addr, 429 if (netlbl_af4list_search_exact(iter4->addr,
432 iter4->mask, 430 iter4->mask,
433 old_list4)) { 431 old_list4)) {
@@ -435,8 +433,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
435 goto add_return; 433 goto add_return;
436 } 434 }
437#if IS_ENABLED(CONFIG_IPV6) 435#if IS_ENABLED(CONFIG_IPV6)
438 netlbl_af6list_foreach_rcu(iter6, 436 netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6)
439 &entry->type_def.addrsel->list6)
440 if (netlbl_af6list_search_exact(&iter6->addr, 437 if (netlbl_af6list_search_exact(&iter6->addr,
441 &iter6->mask, 438 &iter6->mask,
442 old_list6)) { 439 old_list6)) {
@@ -446,7 +443,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
446#endif /* IPv6 */ 443#endif /* IPv6 */
447 444
448 netlbl_af4list_foreach_safe(iter4, tmp4, 445 netlbl_af4list_foreach_safe(iter4, tmp4,
449 &entry->type_def.addrsel->list4) { 446 &entry->def.addrsel->list4) {
450 netlbl_af4list_remove_entry(iter4); 447 netlbl_af4list_remove_entry(iter4);
451 iter4->valid = 1; 448 iter4->valid = 1;
452 ret_val = netlbl_af4list_add(iter4, old_list4); 449 ret_val = netlbl_af4list_add(iter4, old_list4);
@@ -457,7 +454,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
457 } 454 }
458#if IS_ENABLED(CONFIG_IPV6) 455#if IS_ENABLED(CONFIG_IPV6)
459 netlbl_af6list_foreach_safe(iter6, tmp6, 456 netlbl_af6list_foreach_safe(iter6, tmp6,
460 &entry->type_def.addrsel->list6) { 457 &entry->def.addrsel->list6) {
461 netlbl_af6list_remove_entry(iter6); 458 netlbl_af6list_remove_entry(iter6);
462 iter6->valid = 1; 459 iter6->valid = 1;
463 ret_val = netlbl_af6list_add(iter6, old_list6); 460 ret_val = netlbl_af6list_add(iter6, old_list6);
@@ -538,18 +535,18 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
538 struct netlbl_af4list *iter4; 535 struct netlbl_af4list *iter4;
539 struct netlbl_domaddr4_map *map4; 536 struct netlbl_domaddr4_map *map4;
540 537
541 switch (entry->type) { 538 switch (entry->def.type) {
542 case NETLBL_NLTYPE_ADDRSELECT: 539 case NETLBL_NLTYPE_ADDRSELECT:
543 netlbl_af4list_foreach_rcu(iter4, 540 netlbl_af4list_foreach_rcu(iter4,
544 &entry->type_def.addrsel->list4) { 541 &entry->def.addrsel->list4) {
545 map4 = netlbl_domhsh_addr4_entry(iter4); 542 map4 = netlbl_domhsh_addr4_entry(iter4);
546 cipso_v4_doi_putdef(map4->type_def.cipsov4); 543 cipso_v4_doi_putdef(map4->def.cipso);
547 } 544 }
548 /* no need to check the IPv6 list since we currently 545 /* no need to check the IPv6 list since we currently
549 * support only unlabeled protocols for IPv6 */ 546 * support only unlabeled protocols for IPv6 */
550 break; 547 break;
551 case NETLBL_NLTYPE_CIPSOV4: 548 case NETLBL_NLTYPE_CIPSOV4:
552 cipso_v4_doi_putdef(entry->type_def.cipsov4); 549 cipso_v4_doi_putdef(entry->def.cipso);
553 break; 550 break;
554 } 551 }
555 call_rcu(&entry->rcu, netlbl_domhsh_free_entry); 552 call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
@@ -590,20 +587,21 @@ int netlbl_domhsh_remove_af4(const char *domain,
590 entry_map = netlbl_domhsh_search(domain); 587 entry_map = netlbl_domhsh_search(domain);
591 else 588 else
592 entry_map = netlbl_domhsh_search_def(domain); 589 entry_map = netlbl_domhsh_search_def(domain);
593 if (entry_map == NULL || entry_map->type != NETLBL_NLTYPE_ADDRSELECT) 590 if (entry_map == NULL ||
591 entry_map->def.type != NETLBL_NLTYPE_ADDRSELECT)
594 goto remove_af4_failure; 592 goto remove_af4_failure;
595 593
596 spin_lock(&netlbl_domhsh_lock); 594 spin_lock(&netlbl_domhsh_lock);
597 entry_addr = netlbl_af4list_remove(addr->s_addr, mask->s_addr, 595 entry_addr = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
598 &entry_map->type_def.addrsel->list4); 596 &entry_map->def.addrsel->list4);
599 spin_unlock(&netlbl_domhsh_lock); 597 spin_unlock(&netlbl_domhsh_lock);
600 598
601 if (entry_addr == NULL) 599 if (entry_addr == NULL)
602 goto remove_af4_failure; 600 goto remove_af4_failure;
603 netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4) 601 netlbl_af4list_foreach_rcu(iter4, &entry_map->def.addrsel->list4)
604 goto remove_af4_single_addr; 602 goto remove_af4_single_addr;
605#if IS_ENABLED(CONFIG_IPV6) 603#if IS_ENABLED(CONFIG_IPV6)
606 netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6) 604 netlbl_af6list_foreach_rcu(iter6, &entry_map->def.addrsel->list6)
607 goto remove_af4_single_addr; 605 goto remove_af4_single_addr;
608#endif /* IPv6 */ 606#endif /* IPv6 */
609 /* the domain mapping is empty so remove it from the mapping table */ 607 /* the domain mapping is empty so remove it from the mapping table */
@@ -616,7 +614,7 @@ remove_af4_single_addr:
616 * shouldn't be a problem */ 614 * shouldn't be a problem */
617 synchronize_rcu(); 615 synchronize_rcu();
618 entry = netlbl_domhsh_addr4_entry(entry_addr); 616 entry = netlbl_domhsh_addr4_entry(entry_addr);
619 cipso_v4_doi_putdef(entry->type_def.cipsov4); 617 cipso_v4_doi_putdef(entry->def.cipso);
620 kfree(entry); 618 kfree(entry);
621 return 0; 619 return 0;
622 620
@@ -693,8 +691,8 @@ struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain)
693 * responsible for ensuring that rcu_read_[un]lock() is called. 691 * responsible for ensuring that rcu_read_[un]lock() is called.
694 * 692 *
695 */ 693 */
696struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain, 694struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
697 __be32 addr) 695 __be32 addr)
698{ 696{
699 struct netlbl_dom_map *dom_iter; 697 struct netlbl_dom_map *dom_iter;
700 struct netlbl_af4list *addr_iter; 698 struct netlbl_af4list *addr_iter;
@@ -702,15 +700,13 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
702 dom_iter = netlbl_domhsh_search_def(domain); 700 dom_iter = netlbl_domhsh_search_def(domain);
703 if (dom_iter == NULL) 701 if (dom_iter == NULL)
704 return NULL; 702 return NULL;
705 if (dom_iter->type != NETLBL_NLTYPE_ADDRSELECT)
706 return NULL;
707 703
708 addr_iter = netlbl_af4list_search(addr, 704 if (dom_iter->def.type != NETLBL_NLTYPE_ADDRSELECT)
709 &dom_iter->type_def.addrsel->list4); 705 return &dom_iter->def;
706 addr_iter = netlbl_af4list_search(addr, &dom_iter->def.addrsel->list4);
710 if (addr_iter == NULL) 707 if (addr_iter == NULL)
711 return NULL; 708 return NULL;
712 709 return &(netlbl_domhsh_addr4_entry(addr_iter)->def);
713 return netlbl_domhsh_addr4_entry(addr_iter);
714} 710}
715 711
716#if IS_ENABLED(CONFIG_IPV6) 712#if IS_ENABLED(CONFIG_IPV6)
@@ -725,7 +721,7 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
725 * responsible for ensuring that rcu_read_[un]lock() is called. 721 * responsible for ensuring that rcu_read_[un]lock() is called.
726 * 722 *
727 */ 723 */
728struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain, 724struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
729 const struct in6_addr *addr) 725 const struct in6_addr *addr)
730{ 726{
731 struct netlbl_dom_map *dom_iter; 727 struct netlbl_dom_map *dom_iter;
@@ -734,15 +730,13 @@ struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
734 dom_iter = netlbl_domhsh_search_def(domain); 730 dom_iter = netlbl_domhsh_search_def(domain);
735 if (dom_iter == NULL) 731 if (dom_iter == NULL)
736 return NULL; 732 return NULL;
737 if (dom_iter->type != NETLBL_NLTYPE_ADDRSELECT)
738 return NULL;
739 733
740 addr_iter = netlbl_af6list_search(addr, 734 if (dom_iter->def.type != NETLBL_NLTYPE_ADDRSELECT)
741 &dom_iter->type_def.addrsel->list6); 735 return &dom_iter->def;
736 addr_iter = netlbl_af6list_search(addr, &dom_iter->def.addrsel->list6);
742 if (addr_iter == NULL) 737 if (addr_iter == NULL)
743 return NULL; 738 return NULL;
744 739 return &(netlbl_domhsh_addr6_entry(addr_iter)->def);
745 return netlbl_domhsh_addr6_entry(addr_iter);
746} 740}
747#endif /* IPv6 */ 741#endif /* IPv6 */
748 742
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index 90872c4ca30f..b9be0eed8980 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -43,37 +43,35 @@
43#define NETLBL_DOMHSH_BITSIZE 7 43#define NETLBL_DOMHSH_BITSIZE 7
44 44
45/* Domain mapping definition structures */ 45/* Domain mapping definition structures */
46struct netlbl_domaddr_map {
47 struct list_head list4;
48 struct list_head list6;
49};
50struct netlbl_dommap_def {
51 u32 type;
52 union {
53 struct netlbl_domaddr_map *addrsel;
54 struct cipso_v4_doi *cipso;
55 };
56};
46#define netlbl_domhsh_addr4_entry(iter) \ 57#define netlbl_domhsh_addr4_entry(iter) \
47 container_of(iter, struct netlbl_domaddr4_map, list) 58 container_of(iter, struct netlbl_domaddr4_map, list)
48struct netlbl_domaddr4_map { 59struct netlbl_domaddr4_map {
49 u32 type; 60 struct netlbl_dommap_def def;
50 union {
51 struct cipso_v4_doi *cipsov4;
52 } type_def;
53 61
54 struct netlbl_af4list list; 62 struct netlbl_af4list list;
55}; 63};
56#define netlbl_domhsh_addr6_entry(iter) \ 64#define netlbl_domhsh_addr6_entry(iter) \
57 container_of(iter, struct netlbl_domaddr6_map, list) 65 container_of(iter, struct netlbl_domaddr6_map, list)
58struct netlbl_domaddr6_map { 66struct netlbl_domaddr6_map {
59 u32 type; 67 struct netlbl_dommap_def def;
60
61 /* NOTE: no 'type_def' union needed at present since we don't currently
62 * support any IPv6 labeling protocols */
63 68
64 struct netlbl_af6list list; 69 struct netlbl_af6list list;
65}; 70};
66struct netlbl_domaddr_map { 71
67 struct list_head list4;
68 struct list_head list6;
69};
70struct netlbl_dom_map { 72struct netlbl_dom_map {
71 char *domain; 73 char *domain;
72 u32 type; 74 struct netlbl_dommap_def def;
73 union {
74 struct cipso_v4_doi *cipsov4;
75 struct netlbl_domaddr_map *addrsel;
76 } type_def;
77 75
78 u32 valid; 76 u32 valid;
79 struct list_head list; 77 struct list_head list;
@@ -97,16 +95,16 @@ int netlbl_domhsh_remove_af4(const char *domain,
97int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info); 95int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
98int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info); 96int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info);
99struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain); 97struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain);
100struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain, 98struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
101 __be32 addr); 99 __be32 addr);
100#if IS_ENABLED(CONFIG_IPV6)
101struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
102 const struct in6_addr *addr);
103#endif /* IPv6 */
104
102int netlbl_domhsh_walk(u32 *skip_bkt, 105int netlbl_domhsh_walk(u32 *skip_bkt,
103 u32 *skip_chain, 106 u32 *skip_chain,
104 int (*callback) (struct netlbl_dom_map *entry, void *arg), 107 int (*callback) (struct netlbl_dom_map *entry, void *arg),
105 void *cb_arg); 108 void *cb_arg);
106 109
107#if IS_ENABLED(CONFIG_IPV6)
108struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
109 const struct in6_addr *addr);
110#endif /* IPv6 */
111
112#endif 110#endif
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 7c94aedd0912..96a458e12f60 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -122,7 +122,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
122 } 122 }
123 123
124 if (addr == NULL && mask == NULL) 124 if (addr == NULL && mask == NULL)
125 entry->type = NETLBL_NLTYPE_UNLABELED; 125 entry->def.type = NETLBL_NLTYPE_UNLABELED;
126 else if (addr != NULL && mask != NULL) { 126 else if (addr != NULL && mask != NULL) {
127 addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); 127 addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
128 if (addrmap == NULL) 128 if (addrmap == NULL)
@@ -137,7 +137,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
137 map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); 137 map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
138 if (map4 == NULL) 138 if (map4 == NULL)
139 goto cfg_unlbl_map_add_failure; 139 goto cfg_unlbl_map_add_failure;
140 map4->type = NETLBL_NLTYPE_UNLABELED; 140 map4->def.type = NETLBL_NLTYPE_UNLABELED;
141 map4->list.addr = addr4->s_addr & mask4->s_addr; 141 map4->list.addr = addr4->s_addr & mask4->s_addr;
142 map4->list.mask = mask4->s_addr; 142 map4->list.mask = mask4->s_addr;
143 map4->list.valid = 1; 143 map4->list.valid = 1;
@@ -154,7 +154,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
154 map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); 154 map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
155 if (map6 == NULL) 155 if (map6 == NULL)
156 goto cfg_unlbl_map_add_failure; 156 goto cfg_unlbl_map_add_failure;
157 map6->type = NETLBL_NLTYPE_UNLABELED; 157 map6->def.type = NETLBL_NLTYPE_UNLABELED;
158 map6->list.addr = *addr6; 158 map6->list.addr = *addr6;
159 map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0]; 159 map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
160 map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1]; 160 map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
@@ -174,8 +174,8 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
174 break; 174 break;
175 } 175 }
176 176
177 entry->type_def.addrsel = addrmap; 177 entry->def.addrsel = addrmap;
178 entry->type = NETLBL_NLTYPE_ADDRSELECT; 178 entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
179 } else { 179 } else {
180 ret_val = -EINVAL; 180 ret_val = -EINVAL;
181 goto cfg_unlbl_map_add_failure; 181 goto cfg_unlbl_map_add_failure;
@@ -355,8 +355,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
355 } 355 }
356 356
357 if (addr == NULL && mask == NULL) { 357 if (addr == NULL && mask == NULL) {
358 entry->type_def.cipsov4 = doi_def; 358 entry->def.cipso = doi_def;
359 entry->type = NETLBL_NLTYPE_CIPSOV4; 359 entry->def.type = NETLBL_NLTYPE_CIPSOV4;
360 } else if (addr != NULL && mask != NULL) { 360 } else if (addr != NULL && mask != NULL) {
361 addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); 361 addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
362 if (addrmap == NULL) 362 if (addrmap == NULL)
@@ -367,8 +367,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
367 addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); 367 addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
368 if (addrinfo == NULL) 368 if (addrinfo == NULL)
369 goto out_addrinfo; 369 goto out_addrinfo;
370 addrinfo->type_def.cipsov4 = doi_def; 370 addrinfo->def.cipso = doi_def;
371 addrinfo->type = NETLBL_NLTYPE_CIPSOV4; 371 addrinfo->def.type = NETLBL_NLTYPE_CIPSOV4;
372 addrinfo->list.addr = addr->s_addr & mask->s_addr; 372 addrinfo->list.addr = addr->s_addr & mask->s_addr;
373 addrinfo->list.mask = mask->s_addr; 373 addrinfo->list.mask = mask->s_addr;
374 addrinfo->list.valid = 1; 374 addrinfo->list.valid = 1;
@@ -376,8 +376,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
376 if (ret_val != 0) 376 if (ret_val != 0)
377 goto cfg_cipsov4_map_add_failure; 377 goto cfg_cipsov4_map_add_failure;
378 378
379 entry->type_def.addrsel = addrmap; 379 entry->def.addrsel = addrmap;
380 entry->type = NETLBL_NLTYPE_ADDRSELECT; 380 entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
381 } else { 381 } else {
382 ret_val = -EINVAL; 382 ret_val = -EINVAL;
383 goto out_addrmap; 383 goto out_addrmap;
@@ -657,14 +657,14 @@ int netlbl_sock_setattr(struct sock *sk,
657 } 657 }
658 switch (family) { 658 switch (family) {
659 case AF_INET: 659 case AF_INET:
660 switch (dom_entry->type) { 660 switch (dom_entry->def.type) {
661 case NETLBL_NLTYPE_ADDRSELECT: 661 case NETLBL_NLTYPE_ADDRSELECT:
662 ret_val = -EDESTADDRREQ; 662 ret_val = -EDESTADDRREQ;
663 break; 663 break;
664 case NETLBL_NLTYPE_CIPSOV4: 664 case NETLBL_NLTYPE_CIPSOV4:
665 ret_val = cipso_v4_sock_setattr(sk, 665 ret_val = cipso_v4_sock_setattr(sk,
666 dom_entry->type_def.cipsov4, 666 dom_entry->def.cipso,
667 secattr); 667 secattr);
668 break; 668 break;
669 case NETLBL_NLTYPE_UNLABELED: 669 case NETLBL_NLTYPE_UNLABELED:
670 ret_val = 0; 670 ret_val = 0;
@@ -754,23 +754,22 @@ int netlbl_conn_setattr(struct sock *sk,
754{ 754{
755 int ret_val; 755 int ret_val;
756 struct sockaddr_in *addr4; 756 struct sockaddr_in *addr4;
757 struct netlbl_domaddr4_map *af4_entry; 757 struct netlbl_dommap_def *entry;
758 758
759 rcu_read_lock(); 759 rcu_read_lock();
760 switch (addr->sa_family) { 760 switch (addr->sa_family) {
761 case AF_INET: 761 case AF_INET:
762 addr4 = (struct sockaddr_in *)addr; 762 addr4 = (struct sockaddr_in *)addr;
763 af4_entry = netlbl_domhsh_getentry_af4(secattr->domain, 763 entry = netlbl_domhsh_getentry_af4(secattr->domain,
764 addr4->sin_addr.s_addr); 764 addr4->sin_addr.s_addr);
765 if (af4_entry == NULL) { 765 if (entry == NULL) {
766 ret_val = -ENOENT; 766 ret_val = -ENOENT;
767 goto conn_setattr_return; 767 goto conn_setattr_return;
768 } 768 }
769 switch (af4_entry->type) { 769 switch (entry->type) {
770 case NETLBL_NLTYPE_CIPSOV4: 770 case NETLBL_NLTYPE_CIPSOV4:
771 ret_val = cipso_v4_sock_setattr(sk, 771 ret_val = cipso_v4_sock_setattr(sk,
772 af4_entry->type_def.cipsov4, 772 entry->cipso, secattr);
773 secattr);
774 break; 773 break;
775 case NETLBL_NLTYPE_UNLABELED: 774 case NETLBL_NLTYPE_UNLABELED:
776 /* just delete the protocols we support for right now 775 /* just delete the protocols we support for right now
@@ -812,36 +811,21 @@ int netlbl_req_setattr(struct request_sock *req,
812 const struct netlbl_lsm_secattr *secattr) 811 const struct netlbl_lsm_secattr *secattr)
813{ 812{
814 int ret_val; 813 int ret_val;
815 struct netlbl_dom_map *dom_entry; 814 struct netlbl_dommap_def *entry;
816 struct netlbl_domaddr4_map *af4_entry;
817 u32 proto_type;
818 struct cipso_v4_doi *proto_cv4;
819 815
820 rcu_read_lock(); 816 rcu_read_lock();
821 dom_entry = netlbl_domhsh_getentry(secattr->domain);
822 if (dom_entry == NULL) {
823 ret_val = -ENOENT;
824 goto req_setattr_return;
825 }
826 switch (req->rsk_ops->family) { 817 switch (req->rsk_ops->family) {
827 case AF_INET: 818 case AF_INET:
828 if (dom_entry->type == NETLBL_NLTYPE_ADDRSELECT) { 819 entry = netlbl_domhsh_getentry_af4(secattr->domain,
829 struct inet_request_sock *req_inet = inet_rsk(req); 820 inet_rsk(req)->rmt_addr);
830 af4_entry = netlbl_domhsh_getentry_af4(secattr->domain, 821 if (entry == NULL) {
831 req_inet->rmt_addr); 822 ret_val = -ENOENT;
832 if (af4_entry == NULL) { 823 goto req_setattr_return;
833 ret_val = -ENOENT;
834 goto req_setattr_return;
835 }
836 proto_type = af4_entry->type;
837 proto_cv4 = af4_entry->type_def.cipsov4;
838 } else {
839 proto_type = dom_entry->type;
840 proto_cv4 = dom_entry->type_def.cipsov4;
841 } 824 }
842 switch (proto_type) { 825 switch (entry->type) {
843 case NETLBL_NLTYPE_CIPSOV4: 826 case NETLBL_NLTYPE_CIPSOV4:
844 ret_val = cipso_v4_req_setattr(req, proto_cv4, secattr); 827 ret_val = cipso_v4_req_setattr(req,
828 entry->cipso, secattr);
845 break; 829 break;
846 case NETLBL_NLTYPE_UNLABELED: 830 case NETLBL_NLTYPE_UNLABELED:
847 /* just delete the protocols we support for right now 831 /* just delete the protocols we support for right now
@@ -899,23 +883,21 @@ int netlbl_skbuff_setattr(struct sk_buff *skb,
899{ 883{
900 int ret_val; 884 int ret_val;
901 struct iphdr *hdr4; 885 struct iphdr *hdr4;
902 struct netlbl_domaddr4_map *af4_entry; 886 struct netlbl_dommap_def *entry;
903 887
904 rcu_read_lock(); 888 rcu_read_lock();
905 switch (family) { 889 switch (family) {
906 case AF_INET: 890 case AF_INET:
907 hdr4 = ip_hdr(skb); 891 hdr4 = ip_hdr(skb);
908 af4_entry = netlbl_domhsh_getentry_af4(secattr->domain, 892 entry = netlbl_domhsh_getentry_af4(secattr->domain,hdr4->daddr);
909 hdr4->daddr); 893 if (entry == NULL) {
910 if (af4_entry == NULL) {
911 ret_val = -ENOENT; 894 ret_val = -ENOENT;
912 goto skbuff_setattr_return; 895 goto skbuff_setattr_return;
913 } 896 }
914 switch (af4_entry->type) { 897 switch (entry->type) {
915 case NETLBL_NLTYPE_CIPSOV4: 898 case NETLBL_NLTYPE_CIPSOV4:
916 ret_val = cipso_v4_skbuff_setattr(skb, 899 ret_val = cipso_v4_skbuff_setattr(skb, entry->cipso,
917 af4_entry->type_def.cipsov4, 900 secattr);
918 secattr);
919 break; 901 break;
920 case NETLBL_NLTYPE_UNLABELED: 902 case NETLBL_NLTYPE_UNLABELED:
921 /* just delete the protocols we support for right now 903 /* just delete the protocols we support for right now
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index c5384ffc6146..dd1c37d7acbc 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -104,7 +104,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
104 ret_val = -ENOMEM; 104 ret_val = -ENOMEM;
105 goto add_failure; 105 goto add_failure;
106 } 106 }
107 entry->type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]); 107 entry->def.type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]);
108 if (info->attrs[NLBL_MGMT_A_DOMAIN]) { 108 if (info->attrs[NLBL_MGMT_A_DOMAIN]) {
109 size_t tmp_size = nla_len(info->attrs[NLBL_MGMT_A_DOMAIN]); 109 size_t tmp_size = nla_len(info->attrs[NLBL_MGMT_A_DOMAIN]);
110 entry->domain = kmalloc(tmp_size, GFP_KERNEL); 110 entry->domain = kmalloc(tmp_size, GFP_KERNEL);
@@ -116,12 +116,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
116 info->attrs[NLBL_MGMT_A_DOMAIN], tmp_size); 116 info->attrs[NLBL_MGMT_A_DOMAIN], tmp_size);
117 } 117 }
118 118
119 /* NOTE: internally we allow/use a entry->type value of 119 /* NOTE: internally we allow/use a entry->def.type value of
120 * NETLBL_NLTYPE_ADDRSELECT but we don't currently allow users 120 * NETLBL_NLTYPE_ADDRSELECT but we don't currently allow users
121 * to pass that as a protocol value because we need to know the 121 * to pass that as a protocol value because we need to know the
122 * "real" protocol */ 122 * "real" protocol */
123 123
124 switch (entry->type) { 124 switch (entry->def.type) {
125 case NETLBL_NLTYPE_UNLABELED: 125 case NETLBL_NLTYPE_UNLABELED:
126 break; 126 break;
127 case NETLBL_NLTYPE_CIPSOV4: 127 case NETLBL_NLTYPE_CIPSOV4:
@@ -132,7 +132,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
132 cipsov4 = cipso_v4_doi_getdef(tmp_val); 132 cipsov4 = cipso_v4_doi_getdef(tmp_val);
133 if (cipsov4 == NULL) 133 if (cipsov4 == NULL)
134 goto add_failure; 134 goto add_failure;
135 entry->type_def.cipsov4 = cipsov4; 135 entry->def.cipso = cipsov4;
136 break; 136 break;
137 default: 137 default:
138 goto add_failure; 138 goto add_failure;
@@ -172,9 +172,9 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
172 map->list.addr = addr->s_addr & mask->s_addr; 172 map->list.addr = addr->s_addr & mask->s_addr;
173 map->list.mask = mask->s_addr; 173 map->list.mask = mask->s_addr;
174 map->list.valid = 1; 174 map->list.valid = 1;
175 map->type = entry->type; 175 map->def.type = entry->def.type;
176 if (cipsov4) 176 if (cipsov4)
177 map->type_def.cipsov4 = cipsov4; 177 map->def.cipso = cipsov4;
178 178
179 ret_val = netlbl_af4list_add(&map->list, &addrmap->list4); 179 ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
180 if (ret_val != 0) { 180 if (ret_val != 0) {
@@ -182,8 +182,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
182 goto add_failure; 182 goto add_failure;
183 } 183 }
184 184
185 entry->type = NETLBL_NLTYPE_ADDRSELECT; 185 entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
186 entry->type_def.addrsel = addrmap; 186 entry->def.addrsel = addrmap;
187#if IS_ENABLED(CONFIG_IPV6) 187#if IS_ENABLED(CONFIG_IPV6)
188 } else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) { 188 } else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) {
189 struct in6_addr *addr; 189 struct in6_addr *addr;
@@ -223,7 +223,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
223 map->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; 223 map->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
224 map->list.mask = *mask; 224 map->list.mask = *mask;
225 map->list.valid = 1; 225 map->list.valid = 1;
226 map->type = entry->type; 226 map->def.type = entry->def.type;
227 227
228 ret_val = netlbl_af6list_add(&map->list, &addrmap->list6); 228 ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
229 if (ret_val != 0) { 229 if (ret_val != 0) {
@@ -231,8 +231,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
231 goto add_failure; 231 goto add_failure;
232 } 232 }
233 233
234 entry->type = NETLBL_NLTYPE_ADDRSELECT; 234 entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
235 entry->type_def.addrsel = addrmap; 235 entry->def.addrsel = addrmap;
236#endif /* IPv6 */ 236#endif /* IPv6 */
237 } 237 }
238 238
@@ -281,14 +281,13 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
281 return ret_val; 281 return ret_val;
282 } 282 }
283 283
284 switch (entry->type) { 284 switch (entry->def.type) {
285 case NETLBL_NLTYPE_ADDRSELECT: 285 case NETLBL_NLTYPE_ADDRSELECT:
286 nla_a = nla_nest_start(skb, NLBL_MGMT_A_SELECTORLIST); 286 nla_a = nla_nest_start(skb, NLBL_MGMT_A_SELECTORLIST);
287 if (nla_a == NULL) 287 if (nla_a == NULL)
288 return -ENOMEM; 288 return -ENOMEM;
289 289
290 netlbl_af4list_foreach_rcu(iter4, 290 netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
291 &entry->type_def.addrsel->list4) {
292 struct netlbl_domaddr4_map *map4; 291 struct netlbl_domaddr4_map *map4;
293 struct in_addr addr_struct; 292 struct in_addr addr_struct;
294 293
@@ -310,13 +309,13 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
310 return ret_val; 309 return ret_val;
311 map4 = netlbl_domhsh_addr4_entry(iter4); 310 map4 = netlbl_domhsh_addr4_entry(iter4);
312 ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, 311 ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
313 map4->type); 312 map4->def.type);
314 if (ret_val != 0) 313 if (ret_val != 0)
315 return ret_val; 314 return ret_val;
316 switch (map4->type) { 315 switch (map4->def.type) {
317 case NETLBL_NLTYPE_CIPSOV4: 316 case NETLBL_NLTYPE_CIPSOV4:
318 ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI, 317 ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI,
319 map4->type_def.cipsov4->doi); 318 map4->def.cipso->doi);
320 if (ret_val != 0) 319 if (ret_val != 0)
321 return ret_val; 320 return ret_val;
322 break; 321 break;
@@ -325,8 +324,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
325 nla_nest_end(skb, nla_b); 324 nla_nest_end(skb, nla_b);
326 } 325 }
327#if IS_ENABLED(CONFIG_IPV6) 326#if IS_ENABLED(CONFIG_IPV6)
328 netlbl_af6list_foreach_rcu(iter6, 327 netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
329 &entry->type_def.addrsel->list6) {
330 struct netlbl_domaddr6_map *map6; 328 struct netlbl_domaddr6_map *map6;
331 329
332 nla_b = nla_nest_start(skb, NLBL_MGMT_A_ADDRSELECTOR); 330 nla_b = nla_nest_start(skb, NLBL_MGMT_A_ADDRSELECTOR);
@@ -345,7 +343,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
345 return ret_val; 343 return ret_val;
346 map6 = netlbl_domhsh_addr6_entry(iter6); 344 map6 = netlbl_domhsh_addr6_entry(iter6);
347 ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, 345 ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
348 map6->type); 346 map6->def.type);
349 if (ret_val != 0) 347 if (ret_val != 0)
350 return ret_val; 348 return ret_val;
351 349
@@ -356,14 +354,14 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
356 nla_nest_end(skb, nla_a); 354 nla_nest_end(skb, nla_a);
357 break; 355 break;
358 case NETLBL_NLTYPE_UNLABELED: 356 case NETLBL_NLTYPE_UNLABELED:
359 ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, entry->type); 357 ret_val = nla_put_u32(skb,NLBL_MGMT_A_PROTOCOL,entry->def.type);
360 break; 358 break;
361 case NETLBL_NLTYPE_CIPSOV4: 359 case NETLBL_NLTYPE_CIPSOV4:
362 ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, entry->type); 360 ret_val = nla_put_u32(skb,NLBL_MGMT_A_PROTOCOL,entry->def.type);
363 if (ret_val != 0) 361 if (ret_val != 0)
364 return ret_val; 362 return ret_val;
365 ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI, 363 ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI,
366 entry->type_def.cipsov4->doi); 364 entry->def.cipso->doi);
367 break; 365 break;
368 } 366 }
369 367
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index af3531926ee0..8f0897407a2c 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1541,7 +1541,7 @@ int __init netlbl_unlabel_defconf(void)
1541 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1541 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1542 if (entry == NULL) 1542 if (entry == NULL)
1543 return -ENOMEM; 1543 return -ENOMEM;
1544 entry->type = NETLBL_NLTYPE_UNLABELED; 1544 entry->def.type = NETLBL_NLTYPE_UNLABELED;
1545 ret_val = netlbl_domhsh_add_default(entry, &audit_info); 1545 ret_val = netlbl_domhsh_add_default(entry, &audit_info);
1546 if (ret_val != 0) 1546 if (ret_val != 0)
1547 return ret_val; 1547 return ret_val;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2fd6dbea327a..512718adb0d5 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -571,7 +571,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
571 !capable(CAP_NET_ADMIN)) 571 !capable(CAP_NET_ADMIN))
572 return -EPERM; 572 return -EPERM;
573 573
574 if (nlh->nlmsg_flags & NLM_F_DUMP) { 574 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
575 struct netlink_dump_control c = { 575 struct netlink_dump_control c = {
576 .dump = ops->dumpit, 576 .dump = ops->dumpit,
577 .done = ops->done, 577 .done = ops->done,
@@ -877,8 +877,10 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
877#ifdef CONFIG_MODULES 877#ifdef CONFIG_MODULES
878 if (res == NULL) { 878 if (res == NULL) {
879 genl_unlock(); 879 genl_unlock();
880 up_read(&cb_lock);
880 request_module("net-pf-%d-proto-%d-family-%s", 881 request_module("net-pf-%d-proto-%d-family-%s",
881 PF_NETLINK, NETLINK_GENERIC, name); 882 PF_NETLINK, NETLINK_GENERIC, name);
883 down_read(&cb_lock);
882 genl_lock(); 884 genl_lock();
883 res = genl_family_find_byname(name); 885 res = genl_family_find_byname(name);
884 } 886 }
diff --git a/net/nfc/core.c b/net/nfc/core.c
index dc96a83aa6ab..1d074dd1650f 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -44,7 +44,7 @@ DEFINE_MUTEX(nfc_devlist_mutex);
44/* NFC device ID bitmap */ 44/* NFC device ID bitmap */
45static DEFINE_IDA(nfc_index_ida); 45static DEFINE_IDA(nfc_index_ida);
46 46
47int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name) 47int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name)
48{ 48{
49 int rc = 0; 49 int rc = 0;
50 50
@@ -62,28 +62,28 @@ int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name)
62 goto error; 62 goto error;
63 } 63 }
64 64
65 if (!dev->ops->fw_upload) { 65 if (!dev->ops->fw_download) {
66 rc = -EOPNOTSUPP; 66 rc = -EOPNOTSUPP;
67 goto error; 67 goto error;
68 } 68 }
69 69
70 dev->fw_upload_in_progress = true; 70 dev->fw_download_in_progress = true;
71 rc = dev->ops->fw_upload(dev, firmware_name); 71 rc = dev->ops->fw_download(dev, firmware_name);
72 if (rc) 72 if (rc)
73 dev->fw_upload_in_progress = false; 73 dev->fw_download_in_progress = false;
74 74
75error: 75error:
76 device_unlock(&dev->dev); 76 device_unlock(&dev->dev);
77 return rc; 77 return rc;
78} 78}
79 79
80int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name) 80int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
81{ 81{
82 dev->fw_upload_in_progress = false; 82 dev->fw_download_in_progress = false;
83 83
84 return nfc_genl_fw_upload_done(dev, firmware_name); 84 return nfc_genl_fw_download_done(dev, firmware_name);
85} 85}
86EXPORT_SYMBOL(nfc_fw_upload_done); 86EXPORT_SYMBOL(nfc_fw_download_done);
87 87
88/** 88/**
89 * nfc_dev_up - turn on the NFC device 89 * nfc_dev_up - turn on the NFC device
@@ -110,7 +110,7 @@ int nfc_dev_up(struct nfc_dev *dev)
110 goto error; 110 goto error;
111 } 111 }
112 112
113 if (dev->fw_upload_in_progress) { 113 if (dev->fw_download_in_progress) {
114 rc = -EBUSY; 114 rc = -EBUSY;
115 goto error; 115 goto error;
116 } 116 }
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 7b1c186736eb..fe66908401f5 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -809,14 +809,14 @@ static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb)
809 } 809 }
810} 810}
811 811
812static int hci_fw_upload(struct nfc_dev *nfc_dev, const char *firmware_name) 812static int hci_fw_download(struct nfc_dev *nfc_dev, const char *firmware_name)
813{ 813{
814 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); 814 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
815 815
816 if (!hdev->ops->fw_upload) 816 if (!hdev->ops->fw_download)
817 return -ENOTSUPP; 817 return -ENOTSUPP;
818 818
819 return hdev->ops->fw_upload(hdev, firmware_name); 819 return hdev->ops->fw_download(hdev, firmware_name);
820} 820}
821 821
822static struct nfc_ops hci_nfc_ops = { 822static struct nfc_ops hci_nfc_ops = {
@@ -831,7 +831,7 @@ static struct nfc_ops hci_nfc_ops = {
831 .im_transceive = hci_transceive, 831 .im_transceive = hci_transceive,
832 .tm_send = hci_tm_send, 832 .tm_send = hci_tm_send,
833 .check_presence = hci_check_presence, 833 .check_presence = hci_check_presence,
834 .fw_upload = hci_fw_upload, 834 .fw_download = hci_fw_download,
835 .discover_se = hci_discover_se, 835 .discover_se = hci_discover_se,
836 .enable_se = hci_enable_se, 836 .enable_se = hci_enable_se,
837 .disable_se = hci_disable_se, 837 .disable_se = hci_disable_se,
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
index 2a2416080b4f..a4f1e42e3481 100644
--- a/net/nfc/nci/Kconfig
+++ b/net/nfc/nci/Kconfig
@@ -11,6 +11,7 @@ config NFC_NCI
11 11
12config NFC_NCI_SPI 12config NFC_NCI_SPI
13 depends on NFC_NCI && SPI 13 depends on NFC_NCI && SPI
14 select CRC_CCITT
14 bool "NCI over SPI protocol support" 15 bool "NCI over SPI protocol support"
15 default n 16 default n
16 help 17 help
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index b05ad909778f..f16fd59d4160 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1089,7 +1089,7 @@ exit:
1089 return rc; 1089 return rc;
1090} 1090}
1091 1091
1092static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info) 1092static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
1093{ 1093{
1094 struct nfc_dev *dev; 1094 struct nfc_dev *dev;
1095 int rc; 1095 int rc;
@@ -1108,13 +1108,13 @@ static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info)
1108 nla_strlcpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME], 1108 nla_strlcpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME],
1109 sizeof(firmware_name)); 1109 sizeof(firmware_name));
1110 1110
1111 rc = nfc_fw_upload(dev, firmware_name); 1111 rc = nfc_fw_download(dev, firmware_name);
1112 1112
1113 nfc_put_device(dev); 1113 nfc_put_device(dev);
1114 return rc; 1114 return rc;
1115} 1115}
1116 1116
1117int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name) 1117int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
1118{ 1118{
1119 struct sk_buff *msg; 1119 struct sk_buff *msg;
1120 void *hdr; 1120 void *hdr;
@@ -1124,7 +1124,7 @@ int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
1124 return -ENOMEM; 1124 return -ENOMEM;
1125 1125
1126 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 1126 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
1127 NFC_CMD_FW_UPLOAD); 1127 NFC_CMD_FW_DOWNLOAD);
1128 if (!hdr) 1128 if (!hdr)
1129 goto free_msg; 1129 goto free_msg;
1130 1130
@@ -1251,8 +1251,8 @@ static struct genl_ops nfc_genl_ops[] = {
1251 .policy = nfc_genl_policy, 1251 .policy = nfc_genl_policy,
1252 }, 1252 },
1253 { 1253 {
1254 .cmd = NFC_CMD_FW_UPLOAD, 1254 .cmd = NFC_CMD_FW_DOWNLOAD,
1255 .doit = nfc_genl_fw_upload, 1255 .doit = nfc_genl_fw_download,
1256 .policy = nfc_genl_policy, 1256 .policy = nfc_genl_policy,
1257 }, 1257 },
1258 { 1258 {
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index ee85a1fc1b24..820a7850c36a 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -123,10 +123,10 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
123 class_dev_iter_exit(iter); 123 class_dev_iter_exit(iter);
124} 124}
125 125
126int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name); 126int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name);
127int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name); 127int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name);
128 128
129int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name); 129int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name);
130 130
131int nfc_dev_up(struct nfc_dev *dev); 131int nfc_dev_up(struct nfc_dev *dev);
132 132
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 22c5f399f1cf..ab101f715447 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
535{ 535{
536 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); 536 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
537 537
538 OVS_CB(skb)->tun_key = NULL;
538 return do_execute_actions(dp, skb, acts->actions, 539 return do_execute_actions(dp, skb, acts->actions,
539 acts->actions_len, false); 540 acts->actions_len, false);
540} 541}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f7e3a0d84c40..f2ed7600084e 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2076 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 2076 ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
2077 return 0; 2077 return 0;
2078 2078
2079 rtnl_unlock();
2080 return 0;
2081
2082exit_free: 2079exit_free:
2083 kfree_skb(reply); 2080 kfree_skb(reply);
2084exit_unlock: 2081exit_unlock:
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 5c519b121e1b..1aa84dc58777 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
240 struct flex_array *buckets; 240 struct flex_array *buckets;
241 int i, err; 241 int i, err;
242 242
243 buckets = flex_array_alloc(sizeof(struct hlist_head *), 243 buckets = flex_array_alloc(sizeof(struct hlist_head),
244 n_buckets, GFP_KERNEL); 244 n_buckets, GFP_KERNEL);
245 if (!buckets) 245 if (!buckets)
246 return NULL; 246 return NULL;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4b66c752eae5..75c8bbf598c8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3259,9 +3259,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3259 3259
3260 if (po->tp_version == TPACKET_V3) { 3260 if (po->tp_version == TPACKET_V3) {
3261 lv = sizeof(struct tpacket_stats_v3); 3261 lv = sizeof(struct tpacket_stats_v3);
3262 st.stats3.tp_packets += st.stats3.tp_drops;
3262 data = &st.stats3; 3263 data = &st.stats3;
3263 } else { 3264 } else {
3264 lv = sizeof(struct tpacket_stats); 3265 lv = sizeof(struct tpacket_stats);
3266 st.stats1.tp_packets += st.stats1.tp_drops;
3265 data = &st.stats1; 3267 data = &st.stats1;
3266 } 3268 }
3267 3269
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 281c1bded1f6..51b968d3febb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
285 return q; 285 return q;
286} 286}
287 287
288/* The linklayer setting were not transferred from iproute2, in older
289 * versions, and the rate tables lookup systems have been dropped in
290 * the kernel. To keep backward compatible with older iproute2 tc
291 * utils, we detect the linklayer setting by detecting if the rate
292 * table were modified.
293 *
294 * For linklayer ATM table entries, the rate table will be aligned to
295 * 48 bytes, thus some table entries will contain the same value. The
296 * mpu (min packet unit) is also encoded into the old rate table, thus
297 * starting from the mpu, we find low and high table entries for
298 * mapping this cell. If these entries contain the same value, when
299 * the rate tables have been modified for linklayer ATM.
300 *
301 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
302 * and then roundup to the next cell, calc the table entry one below,
303 * and compare.
304 */
305static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
306{
307 int low = roundup(r->mpu, 48);
308 int high = roundup(low+1, 48);
309 int cell_low = low >> r->cell_log;
310 int cell_high = (high >> r->cell_log) - 1;
311
312 /* rtab is too inaccurate at rates > 100Mbit/s */
313 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
314 pr_debug("TC linklayer: Giving up ATM detection\n");
315 return TC_LINKLAYER_ETHERNET;
316 }
317
318 if ((cell_high > cell_low) && (cell_high < 256)
319 && (rtab[cell_low] == rtab[cell_high])) {
320 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
321 cell_low, cell_high, rtab[cell_high]);
322 return TC_LINKLAYER_ATM;
323 }
324 return TC_LINKLAYER_ETHERNET;
325}
326
288static struct qdisc_rate_table *qdisc_rtab_list; 327static struct qdisc_rate_table *qdisc_rtab_list;
289 328
290struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) 329struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
308 rtab->rate = *r; 347 rtab->rate = *r;
309 rtab->refcnt = 1; 348 rtab->refcnt = 1;
310 memcpy(rtab->data, nla_data(tab), 1024); 349 memcpy(rtab->data, nla_data(tab), 1024);
350 if (r->linklayer == TC_LINKLAYER_UNAWARE)
351 r->linklayer = __detect_linklayer(r, rtab->data);
311 rtab->next = qdisc_rtab_list; 352 rtab->next = qdisc_rtab_list;
312 qdisc_rtab_list = rtab; 353 qdisc_rtab_list = rtab;
313 } 354 }
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index ca8e0a57d945..1f9c31411f19 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -605,6 +605,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
605 struct sockaddr_atmpvc pvc; 605 struct sockaddr_atmpvc pvc;
606 int state; 606 int state;
607 607
608 memset(&pvc, 0, sizeof(pvc));
608 pvc.sap_family = AF_ATMPVC; 609 pvc.sap_family = AF_ATMPVC;
609 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; 610 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
610 pvc.sap_addr.vpi = flow->vcc->vpi; 611 pvc.sap_addr.vpi = flow->vcc->vpi;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 71a568862557..7a42c81a19eb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1465,6 +1465,7 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1465 unsigned char *b = skb_tail_pointer(skb); 1465 unsigned char *b = skb_tail_pointer(skb);
1466 struct tc_cbq_wrropt opt; 1466 struct tc_cbq_wrropt opt;
1467 1467
1468 memset(&opt, 0, sizeof(opt));
1468 opt.flags = 0; 1469 opt.flags = 0;
1469 opt.allot = cl->allot; 1470 opt.allot = cl->allot;
1470 opt.priority = cl->priority + 1; 1471 opt.priority = cl->priority + 1;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4626cef4b76e..48be3d5c0d92 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/if_vlan.h>
28#include <net/sch_generic.h> 29#include <net/sch_generic.h>
29#include <net/pkt_sched.h> 30#include <net/pkt_sched.h>
30#include <net/dst.h> 31#include <net/dst.h>
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q)
207 208
208unsigned long dev_trans_start(struct net_device *dev) 209unsigned long dev_trans_start(struct net_device *dev)
209{ 210{
210 unsigned long val, res = dev->trans_start; 211 unsigned long val, res;
211 unsigned int i; 212 unsigned int i;
212 213
214 if (is_vlan_dev(dev))
215 dev = vlan_dev_real_dev(dev);
216 res = dev->trans_start;
213 for (i = 0; i < dev->num_tx_queues; i++) { 217 for (i = 0; i < dev->num_tx_queues; i++) {
214 val = netdev_get_tx_queue(dev, i)->trans_start; 218 val = netdev_get_tx_queue(dev, i)->trans_start;
215 if (val && time_after(val, res)) 219 if (val && time_after(val, res))
216 res = val; 220 res = val;
217 } 221 }
218 dev->trans_start = res; 222 dev->trans_start = res;
223
219 return res; 224 return res;
220} 225}
221EXPORT_SYMBOL(dev_trans_start); 226EXPORT_SYMBOL(dev_trans_start);
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
904 memset(r, 0, sizeof(*r)); 909 memset(r, 0, sizeof(*r));
905 r->overhead = conf->overhead; 910 r->overhead = conf->overhead;
906 r->rate_bytes_ps = conf->rate; 911 r->rate_bytes_ps = conf->rate;
912 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
907 r->mult = 1; 913 r->mult = 1;
908 /* 914 /*
909 * The deal here is to replace a divide by a reciprocal one 915 * The deal here is to replace a divide by a reciprocal one
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c2124ea29f45..c2178b15ca6e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -100,7 +100,7 @@ struct htb_class {
100 struct psched_ratecfg ceil; 100 struct psched_ratecfg ceil;
101 s64 buffer, cbuffer;/* token bucket depth/rate */ 101 s64 buffer, cbuffer;/* token bucket depth/rate */
102 s64 mbuffer; /* max wait time */ 102 s64 mbuffer; /* max wait time */
103 int prio; /* these two are used only by leaves... */ 103 u32 prio; /* these two are used only by leaves... */
104 int quantum; /* but stored for parent-to-leaf return */ 104 int quantum; /* but stored for parent-to-leaf return */
105 105
106 struct tcf_proto *filter_list; /* class attached filters */ 106 struct tcf_proto *filter_list; /* class attached filters */
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1329 struct htb_sched *q = qdisc_priv(sch); 1329 struct htb_sched *q = qdisc_priv(sch);
1330 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1330 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1331 struct nlattr *opt = tca[TCA_OPTIONS]; 1331 struct nlattr *opt = tca[TCA_OPTIONS];
1332 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1332 struct nlattr *tb[TCA_HTB_MAX + 1]; 1333 struct nlattr *tb[TCA_HTB_MAX + 1];
1333 struct tc_htb_opt *hopt; 1334 struct tc_htb_opt *hopt;
1334 1335
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1350 if (!hopt->rate.rate || !hopt->ceil.rate) 1351 if (!hopt->rate.rate || !hopt->ceil.rate)
1351 goto failure; 1352 goto failure;
1352 1353
1354 /* Keeping backward compatible with rate_table based iproute2 tc */
1355 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
1356 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1357 if (rtab)
1358 qdisc_put_rtab(rtab);
1359 }
1360 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
1361 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1362 if (ctab)
1363 qdisc_put_rtab(ctab);
1364 }
1365
1353 if (!cl) { /* new class */ 1366 if (!cl) { /* new class */
1354 struct Qdisc *new_q; 1367 struct Qdisc *new_q;
1355 int prio; 1368 int prio;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index bce5b79662a6..ab67efc64b24 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
846 else 846 else
847 spc_state = SCTP_ADDR_AVAILABLE; 847 spc_state = SCTP_ADDR_AVAILABLE;
848 /* Don't inform ULP about transition from PF to 848 /* Don't inform ULP about transition from PF to
849 * active state and set cwnd to 1, see SCTP 849 * active state and set cwnd to 1 MTU, see SCTP
850 * Quick failover draft section 5.1, point 5 850 * Quick failover draft section 5.1, point 5
851 */ 851 */
852 if (transport->state == SCTP_PF) { 852 if (transport->state == SCTP_PF) {
853 ulp_notify = false; 853 ulp_notify = false;
854 transport->cwnd = 1; 854 transport->cwnd = asoc->pathmtu;
855 } 855 }
856 transport->state = SCTP_ACTIVE; 856 transport->state = SCTP_ACTIVE;
857 break; 857 break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index bdbbc3fd7c14..8fdd16046d66 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
181 return; 181 return;
182 } 182 }
183 183
184 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
185
186 sctp_packet_free(&transport->packet); 184 sctp_packet_free(&transport->packet);
187 185
188 if (transport->asoc) 186 if (transport->asoc)
189 sctp_association_put(transport->asoc); 187 sctp_association_put(transport->asoc);
188
189 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
190} 190}
191 191
192/* Start T3_rtx timer if it is not already running and update the heartbeat 192/* Start T3_rtx timer if it is not already running and update the heartbeat
diff --git a/net/socket.c b/net/socket.c
index 829b460acb87..b2d7c629eeb9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -106,7 +106,7 @@
106#include <linux/atalk.h> 106#include <linux/atalk.h>
107#include <net/busy_poll.h> 107#include <net/busy_poll.h>
108 108
109#ifdef CONFIG_NET_LL_RX_POLL 109#ifdef CONFIG_NET_RX_BUSY_POLL
110unsigned int sysctl_net_busy_read __read_mostly; 110unsigned int sysctl_net_busy_read __read_mostly;
111unsigned int sysctl_net_busy_poll __read_mostly; 111unsigned int sysctl_net_busy_poll __read_mostly;
112#endif 112#endif
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index d304f41260f2..af7ffd447fee 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -120,7 +120,7 @@ static int gssp_rpc_create(struct net *net, struct rpc_clnt **_clnt)
120 if (IS_ERR(clnt)) { 120 if (IS_ERR(clnt)) {
121 dprintk("RPC: failed to create AF_LOCAL gssproxy " 121 dprintk("RPC: failed to create AF_LOCAL gssproxy "
122 "client (errno %ld).\n", PTR_ERR(clnt)); 122 "client (errno %ld).\n", PTR_ERR(clnt));
123 result = -PTR_ERR(clnt); 123 result = PTR_ERR(clnt);
124 *_clnt = NULL; 124 *_clnt = NULL;
125 goto out; 125 goto out;
126 } 126 }
@@ -328,7 +328,6 @@ void gssp_free_upcall_data(struct gssp_upcall_data *data)
328 kfree(data->in_handle.data); 328 kfree(data->in_handle.data);
329 kfree(data->out_handle.data); 329 kfree(data->out_handle.data);
330 kfree(data->out_token.data); 330 kfree(data->out_token.data);
331 kfree(data->mech_oid.data);
332 free_svc_cred(&data->creds); 331 free_svc_cred(&data->creds);
333} 332}
334 333
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 357f613df7ff..3c85d1c8a028 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -430,7 +430,7 @@ static int dummy_enc_nameattr_array(struct xdr_stream *xdr,
430static int dummy_dec_nameattr_array(struct xdr_stream *xdr, 430static int dummy_dec_nameattr_array(struct xdr_stream *xdr,
431 struct gssx_name_attr_array *naa) 431 struct gssx_name_attr_array *naa)
432{ 432{
433 struct gssx_name_attr dummy; 433 struct gssx_name_attr dummy = { .attr = {.len = 0} };
434 u32 count, i; 434 u32 count, i;
435 __be32 *p; 435 __be32 *p;
436 436
@@ -493,12 +493,13 @@ static int gssx_enc_name(struct xdr_stream *xdr,
493 return err; 493 return err;
494} 494}
495 495
496
496static int gssx_dec_name(struct xdr_stream *xdr, 497static int gssx_dec_name(struct xdr_stream *xdr,
497 struct gssx_name *name) 498 struct gssx_name *name)
498{ 499{
499 struct xdr_netobj dummy_netobj; 500 struct xdr_netobj dummy_netobj = { .len = 0 };
500 struct gssx_name_attr_array dummy_name_attr_array; 501 struct gssx_name_attr_array dummy_name_attr_array = { .count = 0 };
501 struct gssx_option_array dummy_option_array; 502 struct gssx_option_array dummy_option_array = { .count = 0 };
502 int err; 503 int err;
503 504
504 /* name->display_name */ 505 /* name->display_name */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d0347d148b34..09fb638bcaa4 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1180,6 +1180,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
1180 gm = gss_mech_get_by_OID(&ud->mech_oid); 1180 gm = gss_mech_get_by_OID(&ud->mech_oid);
1181 if (!gm) 1181 if (!gm)
1182 goto out; 1182 goto out;
1183 rsci.cred.cr_gss_mech = gm;
1183 1184
1184 status = -EINVAL; 1185 status = -EINVAL;
1185 /* mech-specific data: */ 1186 /* mech-specific data: */
@@ -1195,7 +1196,6 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
1195 rscp = rsc_update(cd, &rsci, rscp); 1196 rscp = rsc_update(cd, &rsci, rscp);
1196 status = 0; 1197 status = 0;
1197out: 1198out:
1198 gss_mech_put(gm);
1199 rsc_free(&rsci); 1199 rsc_free(&rsci);
1200 if (rscp) 1200 if (rscp)
1201 cache_put(&rscp->h, cd); 1201 cache_put(&rscp->h, cd);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 74f6a704e374..ecbc4e3d83ad 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1660,6 +1660,10 @@ call_connect(struct rpc_task *task)
1660 task->tk_action = call_connect_status; 1660 task->tk_action = call_connect_status;
1661 if (task->tk_status < 0) 1661 if (task->tk_status < 0)
1662 return; 1662 return;
1663 if (task->tk_flags & RPC_TASK_NOCONNECT) {
1664 rpc_exit(task, -ENOTCONN);
1665 return;
1666 }
1663 xprt_connect(task); 1667 xprt_connect(task);
1664 } 1668 }
1665} 1669}
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index 74d948f5d5a1..779742cfc1ff 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -23,6 +23,7 @@ struct sunrpc_net {
23 struct rpc_clnt *rpcb_local_clnt4; 23 struct rpc_clnt *rpcb_local_clnt4;
24 spinlock_t rpcb_clnt_lock; 24 spinlock_t rpcb_clnt_lock;
25 unsigned int rpcb_users; 25 unsigned int rpcb_users;
26 unsigned int rpcb_is_af_local : 1;
26 27
27 struct mutex gssp_lock; 28 struct mutex gssp_lock;
28 wait_queue_head_t gssp_wq; 29 wait_queue_head_t gssp_wq;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3df764dc330c..1891a1022c17 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -204,13 +204,15 @@ void rpcb_put_local(struct net *net)
204} 204}
205 205
206static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt, 206static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
207 struct rpc_clnt *clnt4) 207 struct rpc_clnt *clnt4,
208 bool is_af_local)
208{ 209{
209 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 210 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
210 211
211 /* Protected by rpcb_create_local_mutex */ 212 /* Protected by rpcb_create_local_mutex */
212 sn->rpcb_local_clnt = clnt; 213 sn->rpcb_local_clnt = clnt;
213 sn->rpcb_local_clnt4 = clnt4; 214 sn->rpcb_local_clnt4 = clnt4;
215 sn->rpcb_is_af_local = is_af_local ? 1 : 0;
214 smp_wmb(); 216 smp_wmb();
215 sn->rpcb_users = 1; 217 sn->rpcb_users = 1;
216 dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: " 218 dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
@@ -238,6 +240,14 @@ static int rpcb_create_local_unix(struct net *net)
238 .program = &rpcb_program, 240 .program = &rpcb_program,
239 .version = RPCBVERS_2, 241 .version = RPCBVERS_2,
240 .authflavor = RPC_AUTH_NULL, 242 .authflavor = RPC_AUTH_NULL,
243 /*
244 * We turn off the idle timeout to prevent the kernel
245 * from automatically disconnecting the socket.
246 * Otherwise, we'd have to cache the mount namespace
247 * of the caller and somehow pass that to the socket
248 * reconnect code.
249 */
250 .flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
241 }; 251 };
242 struct rpc_clnt *clnt, *clnt4; 252 struct rpc_clnt *clnt, *clnt4;
243 int result = 0; 253 int result = 0;
@@ -263,7 +273,7 @@ static int rpcb_create_local_unix(struct net *net)
263 clnt4 = NULL; 273 clnt4 = NULL;
264 } 274 }
265 275
266 rpcb_set_local(net, clnt, clnt4); 276 rpcb_set_local(net, clnt, clnt4, true);
267 277
268out: 278out:
269 return result; 279 return result;
@@ -315,7 +325,7 @@ static int rpcb_create_local_net(struct net *net)
315 clnt4 = NULL; 325 clnt4 = NULL;
316 } 326 }
317 327
318 rpcb_set_local(net, clnt, clnt4); 328 rpcb_set_local(net, clnt, clnt4, false);
319 329
320out: 330out:
321 return result; 331 return result;
@@ -376,13 +386,16 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
376 return rpc_create(&args); 386 return rpc_create(&args);
377} 387}
378 388
379static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg) 389static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
380{ 390{
381 int result, error = 0; 391 int flags = RPC_TASK_NOCONNECT;
392 int error, result = 0;
382 393
394 if (is_set || !sn->rpcb_is_af_local)
395 flags = RPC_TASK_SOFTCONN;
383 msg->rpc_resp = &result; 396 msg->rpc_resp = &result;
384 397
385 error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN); 398 error = rpc_call_sync(clnt, msg, flags);
386 if (error < 0) { 399 if (error < 0) {
387 dprintk("RPC: failed to contact local rpcbind " 400 dprintk("RPC: failed to contact local rpcbind "
388 "server (errno %d).\n", -error); 401 "server (errno %d).\n", -error);
@@ -439,16 +452,19 @@ int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short
439 .rpc_argp = &map, 452 .rpc_argp = &map,
440 }; 453 };
441 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 454 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
455 bool is_set = false;
442 456
443 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " 457 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
444 "rpcbind\n", (port ? "" : "un"), 458 "rpcbind\n", (port ? "" : "un"),
445 prog, vers, prot, port); 459 prog, vers, prot, port);
446 460
447 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET]; 461 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
448 if (port) 462 if (port != 0) {
449 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; 463 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
464 is_set = true;
465 }
450 466
451 return rpcb_register_call(sn->rpcb_local_clnt, &msg); 467 return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
452} 468}
453 469
454/* 470/*
@@ -461,6 +477,7 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
461 const struct sockaddr_in *sin = (const struct sockaddr_in *)sap; 477 const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
462 struct rpcbind_args *map = msg->rpc_argp; 478 struct rpcbind_args *map = msg->rpc_argp;
463 unsigned short port = ntohs(sin->sin_port); 479 unsigned short port = ntohs(sin->sin_port);
480 bool is_set = false;
464 int result; 481 int result;
465 482
466 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); 483 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -471,10 +488,12 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
471 map->r_addr, map->r_netid); 488 map->r_addr, map->r_netid);
472 489
473 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; 490 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
474 if (port) 491 if (port != 0) {
475 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 492 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
493 is_set = true;
494 }
476 495
477 result = rpcb_register_call(sn->rpcb_local_clnt4, msg); 496 result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
478 kfree(map->r_addr); 497 kfree(map->r_addr);
479 return result; 498 return result;
480} 499}
@@ -489,6 +508,7 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
489 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap; 508 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
490 struct rpcbind_args *map = msg->rpc_argp; 509 struct rpcbind_args *map = msg->rpc_argp;
491 unsigned short port = ntohs(sin6->sin6_port); 510 unsigned short port = ntohs(sin6->sin6_port);
511 bool is_set = false;
492 int result; 512 int result;
493 513
494 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); 514 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -499,10 +519,12 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
499 map->r_addr, map->r_netid); 519 map->r_addr, map->r_netid);
500 520
501 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; 521 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
502 if (port) 522 if (port != 0) {
503 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 523 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
524 is_set = true;
525 }
504 526
505 result = rpcb_register_call(sn->rpcb_local_clnt4, msg); 527 result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
506 kfree(map->r_addr); 528 kfree(map->r_addr);
507 return result; 529 return result;
508} 530}
@@ -519,7 +541,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
519 map->r_addr = ""; 541 map->r_addr = "";
520 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; 542 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
521 543
522 return rpcb_register_call(sn->rpcb_local_clnt4, msg); 544 return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
523} 545}
524 546
525/** 547/**
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 305374d4fb98..7762b9f8a8b7 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1193,7 +1193,9 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
1193 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) 1193 if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
1194 return 1; 1194 return 1;
1195 required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg; 1195 required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg;
1196 if (sk_stream_wspace(svsk->sk_sk) >= required) 1196 if (sk_stream_wspace(svsk->sk_sk) >= required ||
1197 (sk_stream_min_wspace(svsk->sk_sk) == 0 &&
1198 atomic_read(&xprt->xpt_reserved) == 0))
1197 return 1; 1199 return 1;
1198 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 1200 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
1199 return 0; 1201 return 0;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index cb29ef7ba2f0..609c30c80816 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
460{ 460{
461 struct tipc_link *l_ptr; 461 struct tipc_link *l_ptr;
462 struct tipc_link *temp_l_ptr; 462 struct tipc_link *temp_l_ptr;
463 struct tipc_link_req *temp_req;
463 464
464 pr_info("Disabling bearer <%s>\n", b_ptr->name); 465 pr_info("Disabling bearer <%s>\n", b_ptr->name);
465 spin_lock_bh(&b_ptr->lock); 466 spin_lock_bh(&b_ptr->lock);
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
468 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 469 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
469 tipc_link_delete(l_ptr); 470 tipc_link_delete(l_ptr);
470 } 471 }
471 if (b_ptr->link_req) 472 temp_req = b_ptr->link_req;
472 tipc_disc_delete(b_ptr->link_req); 473 b_ptr->link_req = NULL;
473 spin_unlock_bh(&b_ptr->lock); 474 spin_unlock_bh(&b_ptr->lock);
475
476 if (temp_req)
477 tipc_disc_delete(temp_req);
478
474 memset(b_ptr, 0, sizeof(struct tipc_bearer)); 479 memset(b_ptr, 0, sizeof(struct tipc_bearer));
475} 480}
476 481
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 19da5abe0fa6..fd3fa57a410e 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -355,8 +355,12 @@ static int tipc_open_listening_sock(struct tipc_server *s)
355 return PTR_ERR(con); 355 return PTR_ERR(con);
356 356
357 sock = tipc_create_listen_sock(con); 357 sock = tipc_create_listen_sock(con);
358 if (!sock) 358 if (!sock) {
359 idr_remove(&s->conn_idr, con->conid);
360 s->idr_in_use--;
361 kfree(con);
359 return -EINVAL; 362 return -EINVAL;
363 }
360 364
361 tipc_register_callbacks(sock, con); 365 tipc_register_callbacks(sock, con);
362 return 0; 366 return 0;
@@ -563,9 +567,14 @@ int tipc_server_start(struct tipc_server *s)
563 kmem_cache_destroy(s->rcvbuf_cache); 567 kmem_cache_destroy(s->rcvbuf_cache);
564 return ret; 568 return ret;
565 } 569 }
570 ret = tipc_open_listening_sock(s);
571 if (ret < 0) {
572 tipc_work_stop(s);
573 kmem_cache_destroy(s->rcvbuf_cache);
574 return ret;
575 }
566 s->enabled = 1; 576 s->enabled = 1;
567 577 return ret;
568 return tipc_open_listening_sock(s);
569} 578}
570 579
571void tipc_server_stop(struct tipc_server *s) 580void tipc_server_stop(struct tipc_server *s)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 593071dabd1c..4d9334683f84 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
347 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { 347 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
348 struct vsock_sock *vsk; 348 struct vsock_sock *vsk;
349 list_for_each_entry(vsk, &vsock_connected_table[i], 349 list_for_each_entry(vsk, &vsock_connected_table[i],
350 connected_table); 350 connected_table)
351 fn(sk_vsock(vsk)); 351 fn(sk_vsock(vsk));
352 } 352 }
353 353
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 4f9f216665e9..a8c29fa4f1b3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
765 cfg80211_leave_mesh(rdev, dev); 765 cfg80211_leave_mesh(rdev, dev);
766 break; 766 break;
767 case NL80211_IFTYPE_AP: 767 case NL80211_IFTYPE_AP:
768 case NL80211_IFTYPE_P2P_GO:
768 cfg80211_stop_ap(rdev, dev); 769 cfg80211_stop_ap(rdev, dev);
769 break; 770 break;
770 default: 771 default:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1cc47aca7f05..5f6e982cdcf4 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
441 goto out_unlock; 441 goto out_unlock;
442 } 442 }
443 *rdev = wiphy_to_dev((*wdev)->wiphy); 443 *rdev = wiphy_to_dev((*wdev)->wiphy);
444 cb->args[0] = (*rdev)->wiphy_idx; 444 /* 0 is the first index - add 1 to parse only once */
445 cb->args[0] = (*rdev)->wiphy_idx + 1;
445 cb->args[1] = (*wdev)->identifier; 446 cb->args[1] = (*wdev)->identifier;
446 } else { 447 } else {
447 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]); 448 /* subtract the 1 again here */
449 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
448 struct wireless_dev *tmp; 450 struct wireless_dev *tmp;
449 451
450 if (!wiphy) { 452 if (!wiphy) {
@@ -2620,8 +2622,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2620 2622
2621 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 2623 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
2622 NL80211_CMD_NEW_KEY); 2624 NL80211_CMD_NEW_KEY);
2623 if (IS_ERR(hdr)) 2625 if (!hdr)
2624 return PTR_ERR(hdr); 2626 return -ENOBUFS;
2625 2627
2626 cookie.msg = msg; 2628 cookie.msg = msg;
2627 cookie.idx = key_idx; 2629 cookie.idx = key_idx;
@@ -4770,9 +4772,9 @@ do { \
4770 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1, 4772 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1,
4771 mask, NL80211_MESHCONF_FORWARDING, 4773 mask, NL80211_MESHCONF_FORWARDING,
4772 nla_get_u8); 4774 nla_get_u8);
4773 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, 1, 255, 4775 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
4774 mask, NL80211_MESHCONF_RSSI_THRESHOLD, 4776 mask, NL80211_MESHCONF_RSSI_THRESHOLD,
4775 nla_get_u32); 4777 nla_get_s32);
4776 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16, 4778 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16,
4777 mask, NL80211_MESHCONF_HT_OPMODE, 4779 mask, NL80211_MESHCONF_HT_OPMODE,
4778 nla_get_u16); 4780 nla_get_u16);
@@ -6505,6 +6507,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
6505 NL80211_CMD_TESTMODE); 6507 NL80211_CMD_TESTMODE);
6506 struct nlattr *tmdata; 6508 struct nlattr *tmdata;
6507 6509
6510 if (!hdr)
6511 break;
6512
6508 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { 6513 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
6509 genlmsg_cancel(skb, hdr); 6514 genlmsg_cancel(skb, hdr);
6510 break; 6515 break;
@@ -6613,12 +6618,14 @@ EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
6613 6618
6614void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) 6619void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
6615{ 6620{
6621 struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
6616 void *hdr = ((void **)skb->cb)[1]; 6622 void *hdr = ((void **)skb->cb)[1];
6617 struct nlattr *data = ((void **)skb->cb)[2]; 6623 struct nlattr *data = ((void **)skb->cb)[2];
6618 6624
6619 nla_nest_end(skb, data); 6625 nla_nest_end(skb, data);
6620 genlmsg_end(skb, hdr); 6626 genlmsg_end(skb, hdr);
6621 genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp); 6627 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
6628 nl80211_testmode_mcgrp.id, gfp);
6622} 6629}
6623EXPORT_SYMBOL(cfg80211_testmode_event); 6630EXPORT_SYMBOL(cfg80211_testmode_event);
6624#endif 6631#endif
@@ -6947,9 +6954,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
6947 6954
6948 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 6955 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
6949 NL80211_CMD_REMAIN_ON_CHANNEL); 6956 NL80211_CMD_REMAIN_ON_CHANNEL);
6950 6957 if (!hdr) {
6951 if (IS_ERR(hdr)) { 6958 err = -ENOBUFS;
6952 err = PTR_ERR(hdr);
6953 goto free_msg; 6959 goto free_msg;
6954 } 6960 }
6955 6961
@@ -7247,9 +7253,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
7247 7253
7248 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 7254 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
7249 NL80211_CMD_FRAME); 7255 NL80211_CMD_FRAME);
7250 7256 if (!hdr) {
7251 if (IS_ERR(hdr)) { 7257 err = -ENOBUFS;
7252 err = PTR_ERR(hdr);
7253 goto free_msg; 7258 goto free_msg;
7254 } 7259 }
7255 } 7260 }
@@ -8128,9 +8133,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
8128 8133
8129 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 8134 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
8130 NL80211_CMD_PROBE_CLIENT); 8135 NL80211_CMD_PROBE_CLIENT);
8131 8136 if (!hdr) {
8132 if (IS_ERR(hdr)) { 8137 err = -ENOBUFS;
8133 err = PTR_ERR(hdr);
8134 goto free_msg; 8138 goto free_msg;
8135 } 8139 }
8136 8140
@@ -10064,7 +10068,8 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
10064 10068
10065 genlmsg_end(msg, hdr); 10069 genlmsg_end(msg, hdr);
10066 10070
10067 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp); 10071 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
10072 nl80211_mlme_mcgrp.id, gfp);
10068 return; 10073 return;
10069 10074
10070 nla_put_failure: 10075 nla_put_failure:
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 5a24c986f34b..de06d5d1287f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2247,10 +2247,13 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
2247 2247
2248void wiphy_regulatory_register(struct wiphy *wiphy) 2248void wiphy_regulatory_register(struct wiphy *wiphy)
2249{ 2249{
2250 struct regulatory_request *lr;
2251
2250 if (!reg_dev_ignore_cell_hint(wiphy)) 2252 if (!reg_dev_ignore_cell_hint(wiphy))
2251 reg_num_devs_support_basehint++; 2253 reg_num_devs_support_basehint++;
2252 2254
2253 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); 2255 lr = get_last_request();
2256 wiphy_update_regulatory(wiphy, lr->initiator);
2254} 2257}
2255 2258
2256void wiphy_regulatory_deregister(struct wiphy *wiphy) 2259void wiphy_regulatory_deregister(struct wiphy *wiphy)
@@ -2279,7 +2282,9 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy)
2279static void reg_timeout_work(struct work_struct *work) 2282static void reg_timeout_work(struct work_struct *work)
2280{ 2283{
2281 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n"); 2284 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
2285 rtnl_lock();
2282 restore_regulatory_settings(true); 2286 restore_regulatory_settings(true);
2287 rtnl_unlock();
2283} 2288}
2284 2289
2285int __init regulatory_init(void) 2290int __init regulatory_init(void)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 1d3cfb1a3f28..20e86a95dc4e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -34,8 +34,10 @@ struct cfg80211_conn {
34 CFG80211_CONN_SCAN_AGAIN, 34 CFG80211_CONN_SCAN_AGAIN,
35 CFG80211_CONN_AUTHENTICATE_NEXT, 35 CFG80211_CONN_AUTHENTICATE_NEXT,
36 CFG80211_CONN_AUTHENTICATING, 36 CFG80211_CONN_AUTHENTICATING,
37 CFG80211_CONN_AUTH_FAILED,
37 CFG80211_CONN_ASSOCIATE_NEXT, 38 CFG80211_CONN_ASSOCIATE_NEXT,
38 CFG80211_CONN_ASSOCIATING, 39 CFG80211_CONN_ASSOCIATING,
40 CFG80211_CONN_ASSOC_FAILED,
39 CFG80211_CONN_DEAUTH, 41 CFG80211_CONN_DEAUTH,
40 CFG80211_CONN_CONNECTED, 42 CFG80211_CONN_CONNECTED,
41 } state; 43 } state;
@@ -164,6 +166,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
164 NULL, 0, 166 NULL, 0,
165 params->key, params->key_len, 167 params->key, params->key_len,
166 params->key_idx, NULL, 0); 168 params->key_idx, NULL, 0);
169 case CFG80211_CONN_AUTH_FAILED:
170 return -ENOTCONN;
167 case CFG80211_CONN_ASSOCIATE_NEXT: 171 case CFG80211_CONN_ASSOCIATE_NEXT:
168 BUG_ON(!rdev->ops->assoc); 172 BUG_ON(!rdev->ops->assoc);
169 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 173 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -188,10 +192,17 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
188 WLAN_REASON_DEAUTH_LEAVING, 192 WLAN_REASON_DEAUTH_LEAVING,
189 false); 193 false);
190 return err; 194 return err;
195 case CFG80211_CONN_ASSOC_FAILED:
196 cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
197 NULL, 0,
198 WLAN_REASON_DEAUTH_LEAVING, false);
199 return -ENOTCONN;
191 case CFG80211_CONN_DEAUTH: 200 case CFG80211_CONN_DEAUTH:
192 cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, 201 cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
193 NULL, 0, 202 NULL, 0,
194 WLAN_REASON_DEAUTH_LEAVING, false); 203 WLAN_REASON_DEAUTH_LEAVING, false);
204 /* free directly, disconnected event already sent */
205 cfg80211_sme_free(wdev);
195 return 0; 206 return 0;
196 default: 207 default:
197 return 0; 208 return 0;
@@ -371,7 +382,7 @@ bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status)
371 return true; 382 return true;
372 } 383 }
373 384
374 wdev->conn->state = CFG80211_CONN_DEAUTH; 385 wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
375 schedule_work(&rdev->conn_work); 386 schedule_work(&rdev->conn_work);
376 return false; 387 return false;
377} 388}
@@ -383,7 +394,13 @@ void cfg80211_sme_deauth(struct wireless_dev *wdev)
383 394
384void cfg80211_sme_auth_timeout(struct wireless_dev *wdev) 395void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
385{ 396{
386 cfg80211_sme_free(wdev); 397 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
398
399 if (!wdev->conn)
400 return;
401
402 wdev->conn->state = CFG80211_CONN_AUTH_FAILED;
403 schedule_work(&rdev->conn_work);
387} 404}
388 405
389void cfg80211_sme_disassoc(struct wireless_dev *wdev) 406void cfg80211_sme_disassoc(struct wireless_dev *wdev)
@@ -399,7 +416,13 @@ void cfg80211_sme_disassoc(struct wireless_dev *wdev)
399 416
400void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev) 417void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
401{ 418{
402 cfg80211_sme_disassoc(wdev); 419 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
420
421 if (!wdev->conn)
422 return;
423
424 wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
425 schedule_work(&rdev->conn_work);
403} 426}
404 427
405static int cfg80211_sme_connect(struct wireless_dev *wdev, 428static int cfg80211_sme_connect(struct wireless_dev *wdev,
@@ -953,21 +976,19 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
953 struct net_device *dev, u16 reason, bool wextev) 976 struct net_device *dev, u16 reason, bool wextev)
954{ 977{
955 struct wireless_dev *wdev = dev->ieee80211_ptr; 978 struct wireless_dev *wdev = dev->ieee80211_ptr;
956 int err; 979 int err = 0;
957 980
958 ASSERT_WDEV_LOCK(wdev); 981 ASSERT_WDEV_LOCK(wdev);
959 982
960 kfree(wdev->connect_keys); 983 kfree(wdev->connect_keys);
961 wdev->connect_keys = NULL; 984 wdev->connect_keys = NULL;
962 985
963 if (wdev->conn) { 986 if (wdev->conn)
964 err = cfg80211_sme_disconnect(wdev, reason); 987 err = cfg80211_sme_disconnect(wdev, reason);
965 } else if (!rdev->ops->disconnect) { 988 else if (!rdev->ops->disconnect)
966 cfg80211_mlme_down(rdev, dev); 989 cfg80211_mlme_down(rdev, dev);
967 err = 0; 990 else if (wdev->current_bss)
968 } else {
969 err = rdev_disconnect(rdev, dev, reason); 991 err = rdev_disconnect(rdev, dev, reason);
970 }
971 992
972 return err; 993 return err;
973} 994}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 3f7682a387b7..eefbd10e408f 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1998,12 +1998,11 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
1998 * 1998 *
1999 * Create or update the port list entry 1999 * Create or update the port list entry
2000 */ 2000 */
2001static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address, 2001static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
2002 int act) 2002 int act)
2003{ 2003{
2004 __be16 *bep; 2004 __be16 *bep;
2005 __be32 *be32p; 2005 __be32 *be32p;
2006 struct sockaddr_in6 *addr6;
2007 struct smk_port_label *spp; 2006 struct smk_port_label *spp;
2008 struct socket_smack *ssp = sk->sk_security; 2007 struct socket_smack *ssp = sk->sk_security;
2009 struct smack_known *skp; 2008 struct smack_known *skp;
@@ -2025,10 +2024,9 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address,
2025 /* 2024 /*
2026 * Get the IP address and port from the address. 2025 * Get the IP address and port from the address.
2027 */ 2026 */
2028 addr6 = (struct sockaddr_in6 *)address; 2027 port = ntohs(address->sin6_port);
2029 port = ntohs(addr6->sin6_port); 2028 bep = (__be16 *)(&address->sin6_addr);
2030 bep = (__be16 *)(&addr6->sin6_addr); 2029 be32p = (__be32 *)(&address->sin6_addr);
2031 be32p = (__be32 *)(&addr6->sin6_addr);
2032 2030
2033 /* 2031 /*
2034 * It's remote, so port lookup does no good. 2032 * It's remote, so port lookup does no good.
@@ -2060,9 +2058,9 @@ auditout:
2060 ad.a.u.net->family = sk->sk_family; 2058 ad.a.u.net->family = sk->sk_family;
2061 ad.a.u.net->dport = port; 2059 ad.a.u.net->dport = port;
2062 if (act == SMK_RECEIVING) 2060 if (act == SMK_RECEIVING)
2063 ad.a.u.net->v6info.saddr = addr6->sin6_addr; 2061 ad.a.u.net->v6info.saddr = address->sin6_addr;
2064 else 2062 else
2065 ad.a.u.net->v6info.daddr = addr6->sin6_addr; 2063 ad.a.u.net->v6info.daddr = address->sin6_addr;
2066#endif 2064#endif
2067 return smk_access(skp, object, MAY_WRITE, &ad); 2065 return smk_access(skp, object, MAY_WRITE, &ad);
2068} 2066}
@@ -2201,7 +2199,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
2201 case PF_INET6: 2199 case PF_INET6:
2202 if (addrlen < sizeof(struct sockaddr_in6)) 2200 if (addrlen < sizeof(struct sockaddr_in6))
2203 return -EINVAL; 2201 return -EINVAL;
2204 rc = smk_ipv6_port_check(sock->sk, sap, SMK_CONNECTING); 2202 rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap,
2203 SMK_CONNECTING);
2205 break; 2204 break;
2206 } 2205 }
2207 return rc; 2206 return rc;
@@ -3034,7 +3033,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
3034 int size) 3033 int size)
3035{ 3034{
3036 struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name; 3035 struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
3037 struct sockaddr *sap = (struct sockaddr *) msg->msg_name; 3036 struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
3038 int rc = 0; 3037 int rc = 0;
3039 3038
3040 /* 3039 /*
@@ -3121,9 +3120,8 @@ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
3121 return smack_net_ambient; 3120 return smack_net_ambient;
3122} 3121}
3123 3122
3124static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr *sap) 3123static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
3125{ 3124{
3126 struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
3127 u8 nexthdr; 3125 u8 nexthdr;
3128 int offset; 3126 int offset;
3129 int proto = -EINVAL; 3127 int proto = -EINVAL;
@@ -3181,7 +3179,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
3181 struct netlbl_lsm_secattr secattr; 3179 struct netlbl_lsm_secattr secattr;
3182 struct socket_smack *ssp = sk->sk_security; 3180 struct socket_smack *ssp = sk->sk_security;
3183 struct smack_known *skp; 3181 struct smack_known *skp;
3184 struct sockaddr sadd; 3182 struct sockaddr_in6 sadd;
3185 int rc = 0; 3183 int rc = 0;
3186 struct smk_audit_info ad; 3184 struct smk_audit_info ad;
3187#ifdef CONFIG_AUDIT 3185#ifdef CONFIG_AUDIT
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 99db892d7299..98969541cbcc 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -743,7 +743,7 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
743 mutex_lock(&stream->device->lock); 743 mutex_lock(&stream->device->lock);
744 switch (_IOC_NR(cmd)) { 744 switch (_IOC_NR(cmd)) {
745 case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION): 745 case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
746 put_user(SNDRV_COMPRESS_VERSION, 746 retval = put_user(SNDRV_COMPRESS_VERSION,
747 (int __user *)arg) ? -EFAULT : 0; 747 (int __user *)arg) ? -EFAULT : 0;
748 break; 748 break;
749 case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): 749 case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 7c11d46b84d3..48a9d004d6d9 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -860,7 +860,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
860 } 860 }
861 } 861 }
862 if (id < 0 && quirk) { 862 if (id < 0 && quirk) {
863 for (q = quirk; q->subvendor; q++) { 863 for (q = quirk; q->subvendor || q->subdevice; q++) {
864 unsigned int vendorid = 864 unsigned int vendorid =
865 q->subdevice | (q->subvendor << 16); 865 q->subdevice | (q->subvendor << 16);
866 unsigned int mask = 0xffff0000 | q->subdevice_mask; 866 unsigned int mask = 0xffff0000 | q->subdevice_mask;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8e77cbbad871..e3c7ba8d7582 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -522,7 +522,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1,
522} 522}
523 523
524#define nid_has_mute(codec, nid, dir) \ 524#define nid_has_mute(codec, nid, dir) \
525 check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE) 525 check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
526#define nid_has_volume(codec, nid, dir) \ 526#define nid_has_volume(codec, nid, dir) \
527 check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) 527 check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
528 528
@@ -624,7 +624,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
624 if (enable) 624 if (enable)
625 val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; 625 val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT;
626 } 626 }
627 if (caps & AC_AMPCAP_MUTE) { 627 if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
628 if (!enable) 628 if (!enable)
629 val |= HDA_AMP_MUTE; 629 val |= HDA_AMP_MUTE;
630 } 630 }
@@ -648,7 +648,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec,
648{ 648{
649 unsigned int mask = 0xff; 649 unsigned int mask = 0xff;
650 650
651 if (caps & AC_AMPCAP_MUTE) { 651 if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
652 if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) 652 if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL))
653 mask &= ~0x80; 653 mask &= ~0x80;
654 } 654 }
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 8860dd529520..bf5e58ec1efe 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -555,6 +555,9 @@ struct azx {
555#ifdef CONFIG_SND_HDA_DSP_LOADER 555#ifdef CONFIG_SND_HDA_DSP_LOADER
556 struct azx_dev saved_azx_dev; 556 struct azx_dev saved_azx_dev;
557#endif 557#endif
558
559 /* secondary power domain for hdmi audio under vga device */
560 struct dev_pm_domain hdmi_pm_domain;
558}; 561};
559 562
560#define CREATE_TRACE_POINTS 563#define CREATE_TRACE_POINTS
@@ -1397,8 +1400,9 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1397 int i, ok; 1400 int i, ok;
1398 1401
1399#ifdef CONFIG_PM_RUNTIME 1402#ifdef CONFIG_PM_RUNTIME
1400 if (chip->pci->dev.power.runtime_status != RPM_ACTIVE) 1403 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1401 return IRQ_NONE; 1404 if (chip->pci->dev.power.runtime_status != RPM_ACTIVE)
1405 return IRQ_NONE;
1402#endif 1406#endif
1403 1407
1404 spin_lock(&chip->reg_lock); 1408 spin_lock(&chip->reg_lock);
@@ -1409,7 +1413,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1409 } 1413 }
1410 1414
1411 status = azx_readl(chip, INTSTS); 1415 status = azx_readl(chip, INTSTS);
1412 if (status == 0) { 1416 if (status == 0 || status == 0xffffffff) {
1413 spin_unlock(&chip->reg_lock); 1417 spin_unlock(&chip->reg_lock);
1414 return IRQ_NONE; 1418 return IRQ_NONE;
1415 } 1419 }
@@ -2971,6 +2975,12 @@ static int azx_runtime_suspend(struct device *dev)
2971 struct snd_card *card = dev_get_drvdata(dev); 2975 struct snd_card *card = dev_get_drvdata(dev);
2972 struct azx *chip = card->private_data; 2976 struct azx *chip = card->private_data;
2973 2977
2978 if (chip->disabled)
2979 return 0;
2980
2981 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
2982 return 0;
2983
2974 azx_stop_chip(chip); 2984 azx_stop_chip(chip);
2975 azx_enter_link_reset(chip); 2985 azx_enter_link_reset(chip);
2976 azx_clear_irq_pending(chip); 2986 azx_clear_irq_pending(chip);
@@ -2984,6 +2994,12 @@ static int azx_runtime_resume(struct device *dev)
2984 struct snd_card *card = dev_get_drvdata(dev); 2994 struct snd_card *card = dev_get_drvdata(dev);
2985 struct azx *chip = card->private_data; 2995 struct azx *chip = card->private_data;
2986 2996
2997 if (chip->disabled)
2998 return 0;
2999
3000 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
3001 return 0;
3002
2987 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 3003 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2988 hda_display_power(true); 3004 hda_display_power(true);
2989 azx_init_pci(chip); 3005 azx_init_pci(chip);
@@ -2996,6 +3012,9 @@ static int azx_runtime_idle(struct device *dev)
2996 struct snd_card *card = dev_get_drvdata(dev); 3012 struct snd_card *card = dev_get_drvdata(dev);
2997 struct azx *chip = card->private_data; 3013 struct azx *chip = card->private_data;
2998 3014
3015 if (chip->disabled)
3016 return 0;
3017
2999 if (!power_save_controller || 3018 if (!power_save_controller ||
3000 !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 3019 !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
3001 return -EBUSY; 3020 return -EBUSY;
@@ -3078,13 +3097,19 @@ static void azx_vs_set_state(struct pci_dev *pci,
3078 "%s: %s via VGA-switcheroo\n", pci_name(chip->pci), 3097 "%s: %s via VGA-switcheroo\n", pci_name(chip->pci),
3079 disabled ? "Disabling" : "Enabling"); 3098 disabled ? "Disabling" : "Enabling");
3080 if (disabled) { 3099 if (disabled) {
3100 pm_runtime_put_sync_suspend(&pci->dev);
3081 azx_suspend(&pci->dev); 3101 azx_suspend(&pci->dev);
3102 /* when we get suspended by vga switcheroo we end up in D3cold,
3103 * however we have no ACPI handle, so pci/acpi can't put us there,
3104 * put ourselves there */
3105 pci->current_state = PCI_D3cold;
3082 chip->disabled = true; 3106 chip->disabled = true;
3083 if (snd_hda_lock_devices(chip->bus)) 3107 if (snd_hda_lock_devices(chip->bus))
3084 snd_printk(KERN_WARNING SFX "%s: Cannot lock devices!\n", 3108 snd_printk(KERN_WARNING SFX "%s: Cannot lock devices!\n",
3085 pci_name(chip->pci)); 3109 pci_name(chip->pci));
3086 } else { 3110 } else {
3087 snd_hda_unlock_devices(chip->bus); 3111 snd_hda_unlock_devices(chip->bus);
3112 pm_runtime_get_noresume(&pci->dev);
3088 chip->disabled = false; 3113 chip->disabled = false;
3089 azx_resume(&pci->dev); 3114 azx_resume(&pci->dev);
3090 } 3115 }
@@ -3139,6 +3164,9 @@ static int register_vga_switcheroo(struct azx *chip)
3139 if (err < 0) 3164 if (err < 0)
3140 return err; 3165 return err;
3141 chip->vga_switcheroo_registered = 1; 3166 chip->vga_switcheroo_registered = 1;
3167
3168 /* register as an optimus hdmi audio power domain */
3169 vga_switcheroo_init_domain_pm_optimus_hdmi_audio(&chip->pci->dev, &chip->hdmi_pm_domain);
3142 return 0; 3170 return 0;
3143} 3171}
3144#else 3172#else
@@ -3887,7 +3915,7 @@ static int azx_probe_continue(struct azx *chip)
3887 power_down_all_codecs(chip); 3915 power_down_all_codecs(chip);
3888 azx_notifier_register(chip); 3916 azx_notifier_register(chip);
3889 azx_add_card_list(chip); 3917 azx_add_card_list(chip);
3890 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME) 3918 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) || chip->use_vga_switcheroo)
3891 pm_runtime_put_noidle(&pci->dev); 3919 pm_runtime_put_noidle(&pci->dev);
3892 3920
3893 return 0; 3921 return 0;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8bd226149868..f303cd898515 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1031,6 +1031,7 @@ enum {
1031 ALC880_FIXUP_GPIO2, 1031 ALC880_FIXUP_GPIO2,
1032 ALC880_FIXUP_MEDION_RIM, 1032 ALC880_FIXUP_MEDION_RIM,
1033 ALC880_FIXUP_LG, 1033 ALC880_FIXUP_LG,
1034 ALC880_FIXUP_LG_LW25,
1034 ALC880_FIXUP_W810, 1035 ALC880_FIXUP_W810,
1035 ALC880_FIXUP_EAPD_COEF, 1036 ALC880_FIXUP_EAPD_COEF,
1036 ALC880_FIXUP_TCL_S700, 1037 ALC880_FIXUP_TCL_S700,
@@ -1089,6 +1090,14 @@ static const struct hda_fixup alc880_fixups[] = {
1089 { } 1090 { }
1090 } 1091 }
1091 }, 1092 },
1093 [ALC880_FIXUP_LG_LW25] = {
1094 .type = HDA_FIXUP_PINS,
1095 .v.pins = (const struct hda_pintbl[]) {
1096 { 0x1a, 0x0181344f }, /* line-in */
1097 { 0x1b, 0x0321403f }, /* headphone */
1098 { }
1099 }
1100 },
1092 [ALC880_FIXUP_W810] = { 1101 [ALC880_FIXUP_W810] = {
1093 .type = HDA_FIXUP_PINS, 1102 .type = HDA_FIXUP_PINS,
1094 .v.pins = (const struct hda_pintbl[]) { 1103 .v.pins = (const struct hda_pintbl[]) {
@@ -1341,6 +1350,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
1341 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), 1350 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG),
1342 SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), 1351 SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG),
1343 SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), 1352 SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG),
1353 SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25),
1344 SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), 1354 SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700),
1345 1355
1346 /* Below is the copied entries from alc880_quirks.c. 1356 /* Below is the copied entries from alc880_quirks.c.
@@ -4329,6 +4339,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4329 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), 4339 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
4330 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), 4340 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
4331 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), 4341 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
4342 SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
4332 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), 4343 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
4333 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4344 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4334 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4345 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index e2f83591161b..6d1924c19abf 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -417,9 +417,11 @@ static void stac_update_outputs(struct hda_codec *codec)
417 val &= ~spec->eapd_mask; 417 val &= ~spec->eapd_mask;
418 else 418 else
419 val |= spec->eapd_mask; 419 val |= spec->eapd_mask;
420 if (spec->gpio_data != val) 420 if (spec->gpio_data != val) {
421 spec->gpio_data = val;
421 stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, 422 stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir,
422 val); 423 val);
424 }
423 } 425 }
424} 426}
425 427
@@ -2817,6 +2819,7 @@ static const struct hda_pintbl ecs202_pin_configs[] = {
2817 2819
2818/* codec SSIDs for Intel Mac sharing the same PCI SSID 8384:7680 */ 2820/* codec SSIDs for Intel Mac sharing the same PCI SSID 8384:7680 */
2819static const struct snd_pci_quirk stac922x_intel_mac_fixup_tbl[] = { 2821static const struct snd_pci_quirk stac922x_intel_mac_fixup_tbl[] = {
2822 SND_PCI_QUIRK(0x0000, 0x0100, "Mac Mini", STAC_INTEL_MAC_V3),
2820 SND_PCI_QUIRK(0x106b, 0x0800, "Mac", STAC_INTEL_MAC_V1), 2823 SND_PCI_QUIRK(0x106b, 0x0800, "Mac", STAC_INTEL_MAC_V1),
2821 SND_PCI_QUIRK(0x106b, 0x0600, "Mac", STAC_INTEL_MAC_V2), 2824 SND_PCI_QUIRK(0x106b, 0x0600, "Mac", STAC_INTEL_MAC_V2),
2822 SND_PCI_QUIRK(0x106b, 0x0700, "Mac", STAC_INTEL_MAC_V2), 2825 SND_PCI_QUIRK(0x106b, 0x0700, "Mac", STAC_INTEL_MAC_V2),
@@ -3231,7 +3234,7 @@ static const struct hda_fixup stac927x_fixups[] = {
3231 /* configure the analog microphone on some laptops */ 3234 /* configure the analog microphone on some laptops */
3232 { 0x0c, 0x90a79130 }, 3235 { 0x0c, 0x90a79130 },
3233 /* correct the front output jack as a hp out */ 3236 /* correct the front output jack as a hp out */
3234 { 0x0f, 0x0227011f }, 3237 { 0x0f, 0x0221101f },
3235 /* correct the front input jack as a mic */ 3238 /* correct the front input jack as a mic */
3236 { 0x0e, 0x02a79130 }, 3239 { 0x0e, 0x02a79130 },
3237 {} 3240 {}
@@ -3612,20 +3615,18 @@ static int stac_parse_auto_config(struct hda_codec *codec)
3612static int stac_init(struct hda_codec *codec) 3615static int stac_init(struct hda_codec *codec)
3613{ 3616{
3614 struct sigmatel_spec *spec = codec->spec; 3617 struct sigmatel_spec *spec = codec->spec;
3615 unsigned int gpio;
3616 int i; 3618 int i;
3617 3619
3618 /* override some hints */ 3620 /* override some hints */
3619 stac_store_hints(codec); 3621 stac_store_hints(codec);
3620 3622
3621 /* set up GPIO */ 3623 /* set up GPIO */
3622 gpio = spec->gpio_data;
3623 /* turn on EAPD statically when spec->eapd_switch isn't set. 3624 /* turn on EAPD statically when spec->eapd_switch isn't set.
3624 * otherwise, unsol event will turn it on/off dynamically 3625 * otherwise, unsol event will turn it on/off dynamically
3625 */ 3626 */
3626 if (!spec->eapd_switch) 3627 if (!spec->eapd_switch)
3627 gpio |= spec->eapd_mask; 3628 spec->gpio_data |= spec->eapd_mask;
3628 stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, gpio); 3629 stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data);
3629 3630
3630 snd_hda_gen_init(codec); 3631 snd_hda_gen_init(codec);
3631 3632
@@ -3915,6 +3916,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
3915{ 3916{
3916 struct sigmatel_spec *spec = codec->spec; 3917 struct sigmatel_spec *spec = codec->spec;
3917 3918
3919 spec->gpio_mask |= spec->eapd_mask;
3918 if (spec->gpio_led) { 3920 if (spec->gpio_led) {
3919 if (!spec->vref_mute_led_nid) { 3921 if (!spec->vref_mute_led_nid) {
3920 spec->gpio_mask |= spec->gpio_led; 3922 spec->gpio_mask |= spec->gpio_led;
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index d6f7694fcad4..c8a2de103c5f 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -341,7 +341,7 @@ static struct platform_driver au1xac97c_driver = {
341 .remove = au1xac97c_drvremove, 341 .remove = au1xac97c_drvremove,
342}; 342};
343 343
344module_platform_driver(&au1xac97c_driver); 344module_platform_driver(au1xac97c_driver);
345 345
346MODULE_LICENSE("GPL"); 346MODULE_LICENSE("GPL");
347MODULE_DESCRIPTION("Au1000/1500/1100 AC97C ASoC driver"); 347MODULE_DESCRIPTION("Au1000/1500/1100 AC97C ASoC driver");
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index efb1daecd0dd..e82eb373a731 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -294,11 +294,12 @@ static int asoc_bfin_ac97_probe(struct platform_device *pdev)
294 /* Request PB3 as reset pin */ 294 /* Request PB3 as reset pin */
295 ret = devm_gpio_request_one(&pdev->dev, 295 ret = devm_gpio_request_one(&pdev->dev,
296 CONFIG_SND_BF5XX_RESET_GPIO_NUM, 296 CONFIG_SND_BF5XX_RESET_GPIO_NUM,
297 GPIOF_OUT_INIT_HIGH, "SND_AD198x RESET") { 297 GPIOF_OUT_INIT_HIGH, "SND_AD198x RESET");
298 if (ret) {
298 dev_err(&pdev->dev, 299 dev_err(&pdev->dev,
299 "Failed to request GPIO_%d for reset: %d\n", 300 "Failed to request GPIO_%d for reset: %d\n",
300 CONFIG_SND_BF5XX_RESET_GPIO_NUM, ret); 301 CONFIG_SND_BF5XX_RESET_GPIO_NUM, ret);
301 goto gpio_err; 302 return ret;
302 } 303 }
303#endif 304#endif
304 305
diff --git a/sound/soc/blackfin/bf5xx-ac97.h b/sound/soc/blackfin/bf5xx-ac97.h
index 15c635e33f4d..0c3e22d90a8d 100644
--- a/sound/soc/blackfin/bf5xx-ac97.h
+++ b/sound/soc/blackfin/bf5xx-ac97.h
@@ -9,7 +9,6 @@
9#ifndef _BF5XX_AC97_H 9#ifndef _BF5XX_AC97_H
10#define _BF5XX_AC97_H 10#define _BF5XX_AC97_H
11 11
12extern struct snd_ac97_bus_ops bf5xx_ac97_ops;
13extern struct snd_ac97 *ac97; 12extern struct snd_ac97 *ac97;
14/* Frame format in memory, only support stereo currently */ 13/* Frame format in memory, only support stereo currently */
15struct ac97_frame { 14struct ac97_frame {
diff --git a/sound/soc/cirrus/ep93xx-ac97.c b/sound/soc/cirrus/ep93xx-ac97.c
index ac73c607410a..04491f0e8d1b 100644
--- a/sound/soc/cirrus/ep93xx-ac97.c
+++ b/sound/soc/cirrus/ep93xx-ac97.c
@@ -102,13 +102,13 @@ static struct ep93xx_ac97_info *ep93xx_ac97_info;
102 102
103static struct ep93xx_dma_data ep93xx_ac97_pcm_out = { 103static struct ep93xx_dma_data ep93xx_ac97_pcm_out = {
104 .name = "ac97-pcm-out", 104 .name = "ac97-pcm-out",
105 .dma_port = EP93XX_DMA_AAC1, 105 .port = EP93XX_DMA_AAC1,
106 .direction = DMA_MEM_TO_DEV, 106 .direction = DMA_MEM_TO_DEV,
107}; 107};
108 108
109static struct ep93xx_dma_data ep93xx_ac97_pcm_in = { 109static struct ep93xx_dma_data ep93xx_ac97_pcm_in = {
110 .name = "ac97-pcm-in", 110 .name = "ac97-pcm-in",
111 .dma_port = EP93XX_DMA_AAC1, 111 .port = EP93XX_DMA_AAC1,
112 .direction = DMA_DEV_TO_MEM, 112 .direction = DMA_DEV_TO_MEM,
113}; 113};
114 114
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 987f728718c5..be2ba1b6fe4a 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -195,6 +195,8 @@ static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
195 195
196static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); 196static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
197 197
198static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
199
198static const unsigned int limiter_tlv[] = { 200static const unsigned int limiter_tlv[] = {
199 TLV_DB_RANGE_HEAD(2), 201 TLV_DB_RANGE_HEAD(2),
200 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), 202 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
@@ -451,7 +453,8 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
451 SOC_ENUM("Beep Pitch", beep_pitch_enum), 453 SOC_ENUM("Beep Pitch", beep_pitch_enum),
452 SOC_ENUM("Beep on Time", beep_ontime_enum), 454 SOC_ENUM("Beep on Time", beep_ontime_enum),
453 SOC_ENUM("Beep off Time", beep_offtime_enum), 455 SOC_ENUM("Beep off Time", beep_offtime_enum),
454 SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv), 456 SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL,
457 0, 0x07, 0x1f, beep_tlv),
455 SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1), 458 SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1),
456 SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum), 459 SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum),
457 SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum), 460 SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum),
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 3eeada57e87d..566a367c94fa 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -1612,7 +1612,7 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute)
1612 1612
1613static void max98088_sync_cache(struct snd_soc_codec *codec) 1613static void max98088_sync_cache(struct snd_soc_codec *codec)
1614{ 1614{
1615 u16 *reg_cache = codec->reg_cache; 1615 u8 *reg_cache = codec->reg_cache;
1616 int i; 1616 int i;
1617 1617
1618 if (!codec->cache_sync) 1618 if (!codec->cache_sync)
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d659d3adcfb3..760e8bfeacaa 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -153,6 +153,8 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
153static int power_vag_event(struct snd_soc_dapm_widget *w, 153static int power_vag_event(struct snd_soc_dapm_widget *w,
154 struct snd_kcontrol *kcontrol, int event) 154 struct snd_kcontrol *kcontrol, int event)
155{ 155{
156 const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
157
156 switch (event) { 158 switch (event) {
157 case SND_SOC_DAPM_POST_PMU: 159 case SND_SOC_DAPM_POST_PMU:
158 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, 160 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
@@ -160,9 +162,17 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
160 break; 162 break;
161 163
162 case SND_SOC_DAPM_PRE_PMD: 164 case SND_SOC_DAPM_PRE_PMD:
163 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, 165 /*
164 SGTL5000_VAG_POWERUP, 0); 166 * Don't clear VAG_POWERUP, when both DAC and ADC are
165 msleep(400); 167 * operational to prevent inadvertently starving the
168 * other one of them.
169 */
170 if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) &
171 mask) != mask) {
172 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
173 SGTL5000_VAG_POWERUP, 0);
174 msleep(400);
175 }
166 break; 176 break;
167 default: 177 default:
168 break; 178 break;
@@ -388,7 +398,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
388 SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0), 398 SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0),
389 SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)", 399 SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)",
390 SGTL5000_CHIP_ANA_ADC_CTRL, 400 SGTL5000_CHIP_ANA_ADC_CTRL,
391 8, 2, 0, capture_6db_attenuate), 401 8, 1, 0, capture_6db_attenuate),
392 SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0), 402 SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0),
393 403
394 SOC_DOUBLE_TLV("Headphone Playback Volume", 404 SOC_DOUBLE_TLV("Headphone Playback Volume",
@@ -1527,6 +1537,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
1527 if (IS_ERR(sgtl5000->mclk)) { 1537 if (IS_ERR(sgtl5000->mclk)) {
1528 ret = PTR_ERR(sgtl5000->mclk); 1538 ret = PTR_ERR(sgtl5000->mclk);
1529 dev_err(&client->dev, "Failed to get mclock: %d\n", ret); 1539 dev_err(&client->dev, "Failed to get mclock: %d\n", ret);
1540 /* Defer the probe to see if the clk will be provided later */
1541 if (ret == -ENOENT)
1542 return -EPROBE_DEFER;
1530 return ret; 1543 return ret;
1531 } 1544 }
1532 1545
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index f5e835662cdc..10adc4145d46 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -410,6 +410,16 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec)
410 rec->command, rec->length); 410 rec->command, rec->length);
411 len = rec->length + 8; 411 len = rec->length + 8;
412 412
413 xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
414 if (!xfer) {
415 dev_err(codec->dev, "Failed to allocate xfer\n");
416 ret = -ENOMEM;
417 goto abort;
418 }
419
420 xfer->codec = codec;
421 list_add_tail(&xfer->list, &xfer_list);
422
413 out = kzalloc(len, GFP_KERNEL); 423 out = kzalloc(len, GFP_KERNEL);
414 if (!out) { 424 if (!out) {
415 dev_err(codec->dev, 425 dev_err(codec->dev,
@@ -417,6 +427,7 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec)
417 ret = -ENOMEM; 427 ret = -ENOMEM;
418 goto abort1; 428 goto abort1;
419 } 429 }
430 xfer->t.rx_buf = out;
420 431
421 img = kzalloc(len, GFP_KERNEL); 432 img = kzalloc(len, GFP_KERNEL);
422 if (!img) { 433 if (!img) {
@@ -425,24 +436,13 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec)
425 ret = -ENOMEM; 436 ret = -ENOMEM;
426 goto abort1; 437 goto abort1;
427 } 438 }
439 xfer->t.tx_buf = img;
428 440
429 byte_swap_64((u64 *)&rec->command, img, len); 441 byte_swap_64((u64 *)&rec->command, img, len);
430 442
431 xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
432 if (!xfer) {
433 dev_err(codec->dev, "Failed to allocate xfer\n");
434 ret = -ENOMEM;
435 goto abort1;
436 }
437
438 xfer->codec = codec;
439 list_add_tail(&xfer->list, &xfer_list);
440
441 spi_message_init(&xfer->m); 443 spi_message_init(&xfer->m);
442 xfer->m.complete = wm0010_boot_xfer_complete; 444 xfer->m.complete = wm0010_boot_xfer_complete;
443 xfer->m.context = xfer; 445 xfer->m.context = xfer;
444 xfer->t.tx_buf = img;
445 xfer->t.rx_buf = out;
446 xfer->t.len = len; 446 xfer->t.len = len;
447 xfer->t.bits_per_word = 8; 447 xfer->t.bits_per_word = 8;
448 448
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0ec070cf7231..d82ee386eab5 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3908,10 +3908,8 @@ int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
3908{ 3908{
3909 /* create platform component name */ 3909 /* create platform component name */
3910 platform->name = fmt_single_name(dev, &platform->id); 3910 platform->name = fmt_single_name(dev, &platform->id);
3911 if (platform->name == NULL) { 3911 if (platform->name == NULL)
3912 kfree(platform);
3913 return -ENOMEM; 3912 return -ENOMEM;
3914 }
3915 3913
3916 platform->dev = dev; 3914 platform->dev = dev;
3917 platform->driver = platform_drv; 3915 platform->driver = platform_drv;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index b94190820e8c..4375c9f2b791 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -679,13 +679,14 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
679 return -EINVAL; 679 return -EINVAL;
680 } 680 }
681 681
682 path = list_first_entry(&w->sources, struct snd_soc_dapm_path, 682 if (list_empty(&w->sources)) {
683 list_sink);
684 if (!path) {
685 dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name); 683 dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name);
686 return -EINVAL; 684 return -EINVAL;
687 } 685 }
688 686
687 path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
688 list_sink);
689
689 ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path); 690 ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path);
690 if (ret < 0) 691 if (ret < 0)
691 return ret; 692 return ret;
@@ -2733,7 +2734,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
2733 } 2734 }
2734 2735
2735 mutex_unlock(&card->dapm_mutex); 2736 mutex_unlock(&card->dapm_mutex);
2736 return 0; 2737 return change;
2737} 2738}
2738EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw); 2739EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw);
2739 2740
@@ -2861,7 +2862,6 @@ int snd_soc_dapm_put_enum_virt(struct snd_kcontrol *kcontrol,
2861 struct soc_enum *e = 2862 struct soc_enum *e =
2862 (struct soc_enum *)kcontrol->private_value; 2863 (struct soc_enum *)kcontrol->private_value;
2863 int change; 2864 int change;
2864 int ret = 0;
2865 int wi; 2865 int wi;
2866 2866
2867 if (ucontrol->value.enumerated.item[0] >= e->max) 2867 if (ucontrol->value.enumerated.item[0] >= e->max)
@@ -2881,7 +2881,7 @@ int snd_soc_dapm_put_enum_virt(struct snd_kcontrol *kcontrol,
2881 } 2881 }
2882 2882
2883 mutex_unlock(&card->dapm_mutex); 2883 mutex_unlock(&card->dapm_mutex);
2884 return ret; 2884 return change;
2885} 2885}
2886EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_virt); 2886EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_virt);
2887 2887
diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c
index e58233f7df61..6c486625321b 100644
--- a/sound/soc/tegra/tegra20_ac97.c
+++ b/sound/soc/tegra/tegra20_ac97.c
@@ -389,9 +389,9 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
389 ac97->capture_dma_data.slave_id = of_dma[1]; 389 ac97->capture_dma_data.slave_id = of_dma[1];
390 390
391 ac97->playback_dma_data.addr = mem->start + TEGRA20_AC97_FIFO_TX1; 391 ac97->playback_dma_data.addr = mem->start + TEGRA20_AC97_FIFO_TX1;
392 ac97->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 392 ac97->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
393 ac97->capture_dma_data.maxburst = 4; 393 ac97->playback_dma_data.maxburst = 4;
394 ac97->capture_dma_data.slave_id = of_dma[0]; 394 ac97->playback_dma_data.slave_id = of_dma[1];
395 395
396 ret = tegra_asoc_utils_init(&ac97->util_data, &pdev->dev); 396 ret = tegra_asoc_utils_init(&ac97->util_data, &pdev->dev);
397 if (ret) 397 if (ret)
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 5eaa12cdc6eb..551b3c93ce93 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -323,8 +323,8 @@ static int tegra20_spdif_platform_probe(struct platform_device *pdev)
323 } 323 }
324 324
325 spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT; 325 spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT;
326 spdif->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 326 spdif->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
327 spdif->capture_dma_data.maxburst = 4; 327 spdif->playback_dma_data.maxburst = 4;
328 spdif->playback_dma_data.slave_id = dmareq->start; 328 spdif->playback_dma_data.slave_id = dmareq->start;
329 329
330 pm_runtime_enable(&pdev->dev); 330 pm_runtime_enable(&pdev->dev);
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index d04146cad61f..47565fd04505 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
228 reg = TEGRA30_I2S_CIF_RX_CTRL; 228 reg = TEGRA30_I2S_CIF_RX_CTRL;
229 } else { 229 } else {
230 val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX; 230 val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
231 reg = TEGRA30_I2S_CIF_RX_CTRL; 231 reg = TEGRA30_I2S_CIF_TX_CTRL;
232 } 232 }
233 233
234 regmap_write(i2s->regmap, reg, val); 234 regmap_write(i2s->regmap, reg, val);
diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c
index 9e6e3ffd86bb..23452ee617e1 100644
--- a/sound/usb/6fire/comm.c
+++ b/sound/usb/6fire/comm.c
@@ -110,19 +110,37 @@ static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev)
110static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request, 110static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
111 u8 reg, u8 value) 111 u8 reg, u8 value)
112{ 112{
113 u8 buffer[13]; /* 13: maximum length of message */ 113 u8 *buffer;
114 int ret;
115
116 /* 13: maximum length of message */
117 buffer = kmalloc(13, GFP_KERNEL);
118 if (!buffer)
119 return -ENOMEM;
114 120
115 usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00); 121 usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
116 return usb6fire_comm_send_buffer(buffer, rt->chip->dev); 122 ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
123
124 kfree(buffer);
125 return ret;
117} 126}
118 127
119static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request, 128static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
120 u8 reg, u8 vl, u8 vh) 129 u8 reg, u8 vl, u8 vh)
121{ 130{
122 u8 buffer[13]; /* 13: maximum length of message */ 131 u8 *buffer;
132 int ret;
133
134 /* 13: maximum length of message */
135 buffer = kmalloc(13, GFP_KERNEL);
136 if (!buffer)
137 return -ENOMEM;
123 138
124 usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh); 139 usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
125 return usb6fire_comm_send_buffer(buffer, rt->chip->dev); 140 ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
141
142 kfree(buffer);
143 return ret;
126} 144}
127 145
128int usb6fire_comm_init(struct sfire_chip *chip) 146int usb6fire_comm_init(struct sfire_chip *chip)
@@ -135,6 +153,12 @@ int usb6fire_comm_init(struct sfire_chip *chip)
135 if (!rt) 153 if (!rt)
136 return -ENOMEM; 154 return -ENOMEM;
137 155
156 rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
157 if (!rt->receiver_buffer) {
158 kfree(rt);
159 return -ENOMEM;
160 }
161
138 urb = &rt->receiver; 162 urb = &rt->receiver;
139 rt->serial = 1; 163 rt->serial = 1;
140 rt->chip = chip; 164 rt->chip = chip;
@@ -153,6 +177,7 @@ int usb6fire_comm_init(struct sfire_chip *chip)
153 urb->interval = 1; 177 urb->interval = 1;
154 ret = usb_submit_urb(urb, GFP_KERNEL); 178 ret = usb_submit_urb(urb, GFP_KERNEL);
155 if (ret < 0) { 179 if (ret < 0) {
180 kfree(rt->receiver_buffer);
156 kfree(rt); 181 kfree(rt);
157 snd_printk(KERN_ERR PREFIX "cannot create comm data receiver."); 182 snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
158 return ret; 183 return ret;
@@ -171,6 +196,9 @@ void usb6fire_comm_abort(struct sfire_chip *chip)
171 196
172void usb6fire_comm_destroy(struct sfire_chip *chip) 197void usb6fire_comm_destroy(struct sfire_chip *chip)
173{ 198{
174 kfree(chip->comm); 199 struct comm_runtime *rt = chip->comm;
200
201 kfree(rt->receiver_buffer);
202 kfree(rt);
175 chip->comm = NULL; 203 chip->comm = NULL;
176} 204}
diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h
index 6a0840b0dcff..780d5ed8e5d8 100644
--- a/sound/usb/6fire/comm.h
+++ b/sound/usb/6fire/comm.h
@@ -24,7 +24,7 @@ struct comm_runtime {
24 struct sfire_chip *chip; 24 struct sfire_chip *chip;
25 25
26 struct urb receiver; 26 struct urb receiver;
27 u8 receiver_buffer[COMM_RECEIVER_BUFSIZE]; 27 u8 *receiver_buffer;
28 28
29 u8 serial; /* urb serial */ 29 u8 serial; /* urb serial */
30 30
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
index 26722423330d..f3dd7266c391 100644
--- a/sound/usb/6fire/midi.c
+++ b/sound/usb/6fire/midi.c
@@ -19,6 +19,10 @@
19#include "chip.h" 19#include "chip.h"
20#include "comm.h" 20#include "comm.h"
21 21
22enum {
23 MIDI_BUFSIZE = 64
24};
25
22static void usb6fire_midi_out_handler(struct urb *urb) 26static void usb6fire_midi_out_handler(struct urb *urb)
23{ 27{
24 struct midi_runtime *rt = urb->context; 28 struct midi_runtime *rt = urb->context;
@@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip)
156 if (!rt) 160 if (!rt)
157 return -ENOMEM; 161 return -ENOMEM;
158 162
163 rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
164 if (!rt->out_buffer) {
165 kfree(rt);
166 return -ENOMEM;
167 }
168
159 rt->chip = chip; 169 rt->chip = chip;
160 rt->in_received = usb6fire_midi_in_received; 170 rt->in_received = usb6fire_midi_in_received;
161 rt->out_buffer[0] = 0x80; /* 'send midi' command */ 171 rt->out_buffer[0] = 0x80; /* 'send midi' command */
@@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip)
169 179
170 ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); 180 ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
171 if (ret < 0) { 181 if (ret < 0) {
182 kfree(rt->out_buffer);
172 kfree(rt); 183 kfree(rt);
173 snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); 184 snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
174 return ret; 185 return ret;
@@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip)
197 208
198void usb6fire_midi_destroy(struct sfire_chip *chip) 209void usb6fire_midi_destroy(struct sfire_chip *chip)
199{ 210{
200 kfree(chip->midi); 211 struct midi_runtime *rt = chip->midi;
212
213 kfree(rt->out_buffer);
214 kfree(rt);
201 chip->midi = NULL; 215 chip->midi = NULL;
202} 216}
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
index c321006e5430..84851b9f5559 100644
--- a/sound/usb/6fire/midi.h
+++ b/sound/usb/6fire/midi.h
@@ -16,10 +16,6 @@
16 16
17#include "common.h" 17#include "common.h"
18 18
19enum {
20 MIDI_BUFSIZE = 64
21};
22
23struct midi_runtime { 19struct midi_runtime {
24 struct sfire_chip *chip; 20 struct sfire_chip *chip;
25 struct snd_rawmidi *instance; 21 struct snd_rawmidi *instance;
@@ -32,7 +28,7 @@ struct midi_runtime {
32 struct snd_rawmidi_substream *out; 28 struct snd_rawmidi_substream *out;
33 struct urb out_urb; 29 struct urb out_urb;
34 u8 out_serial; /* serial number of out packet */ 30 u8 out_serial; /* serial number of out packet */
35 u8 out_buffer[MIDI_BUFSIZE]; 31 u8 *out_buffer;
36 int buffer_offset; 32 int buffer_offset;
37 33
38 void (*in_received)(struct midi_runtime *rt, u8 *data, int length); 34 void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 2aa4e13063a8..b5eb97fdc842 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -543,7 +543,7 @@ static snd_pcm_uframes_t usb6fire_pcm_pointer(
543 snd_pcm_uframes_t ret; 543 snd_pcm_uframes_t ret;
544 544
545 if (rt->panic || !sub) 545 if (rt->panic || !sub)
546 return SNDRV_PCM_STATE_XRUN; 546 return SNDRV_PCM_POS_XRUN;
547 547
548 spin_lock_irqsave(&sub->lock, flags); 548 spin_lock_irqsave(&sub->lock, flags);
549 ret = sub->dma_off; 549 ret = sub->dma_off;
@@ -582,6 +582,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
582 urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; 582 urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
583} 583}
584 584
585static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
586{
587 int i;
588
589 for (i = 0; i < PCM_N_URBS; i++) {
590 rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
591 * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
592 if (!rt->out_urbs[i].buffer)
593 return -ENOMEM;
594 rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
595 * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
596 if (!rt->in_urbs[i].buffer)
597 return -ENOMEM;
598 }
599 return 0;
600}
601
602static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
603{
604 int i;
605
606 for (i = 0; i < PCM_N_URBS; i++) {
607 kfree(rt->out_urbs[i].buffer);
608 kfree(rt->in_urbs[i].buffer);
609 }
610}
611
585int usb6fire_pcm_init(struct sfire_chip *chip) 612int usb6fire_pcm_init(struct sfire_chip *chip)
586{ 613{
587 int i; 614 int i;
@@ -593,6 +620,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
593 if (!rt) 620 if (!rt)
594 return -ENOMEM; 621 return -ENOMEM;
595 622
623 ret = usb6fire_pcm_buffers_init(rt);
624 if (ret) {
625 usb6fire_pcm_buffers_destroy(rt);
626 kfree(rt);
627 return ret;
628 }
629
596 rt->chip = chip; 630 rt->chip = chip;
597 rt->stream_state = STREAM_DISABLED; 631 rt->stream_state = STREAM_DISABLED;
598 rt->rate = ARRAY_SIZE(rates); 632 rt->rate = ARRAY_SIZE(rates);
@@ -614,6 +648,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
614 648
615 ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm); 649 ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
616 if (ret < 0) { 650 if (ret < 0) {
651 usb6fire_pcm_buffers_destroy(rt);
617 kfree(rt); 652 kfree(rt);
618 snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n"); 653 snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
619 return ret; 654 return ret;
@@ -625,6 +660,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
625 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops); 660 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops);
626 661
627 if (ret) { 662 if (ret) {
663 usb6fire_pcm_buffers_destroy(rt);
628 kfree(rt); 664 kfree(rt);
629 snd_printk(KERN_ERR PREFIX 665 snd_printk(KERN_ERR PREFIX
630 "error preallocating pcm buffers.\n"); 666 "error preallocating pcm buffers.\n");
@@ -669,6 +705,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip)
669 705
670void usb6fire_pcm_destroy(struct sfire_chip *chip) 706void usb6fire_pcm_destroy(struct sfire_chip *chip)
671{ 707{
672 kfree(chip->pcm); 708 struct pcm_runtime *rt = chip->pcm;
709
710 usb6fire_pcm_buffers_destroy(rt);
711 kfree(rt);
673 chip->pcm = NULL; 712 chip->pcm = NULL;
674} 713}
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
index 9b01133ee3fe..f5779d6182c6 100644
--- a/sound/usb/6fire/pcm.h
+++ b/sound/usb/6fire/pcm.h
@@ -32,7 +32,7 @@ struct pcm_urb {
32 struct urb instance; 32 struct urb instance;
33 struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB]; 33 struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
34 /* END DO NOT SEPARATE */ 34 /* END DO NOT SEPARATE */
35 u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE]; 35 u8 *buffer;
36 36
37 struct pcm_urb *peer; 37 struct pcm_urb *peer;
38}; 38};
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 7a444b5501d9..659950e5b94f 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -591,17 +591,16 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
591 ep->stride = frame_bits >> 3; 591 ep->stride = frame_bits >> 3;
592 ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0; 592 ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
593 593
594 /* calculate max. frequency */ 594 /* assume max. frequency is 25% higher than nominal */
595 if (ep->maxpacksize) { 595 ep->freqmax = ep->freqn + (ep->freqn >> 2);
596 maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
597 >> (16 - ep->datainterval);
598 /* but wMaxPacketSize might reduce this */
599 if (ep->maxpacksize && ep->maxpacksize < maxsize) {
596 /* whatever fits into a max. size packet */ 600 /* whatever fits into a max. size packet */
597 maxsize = ep->maxpacksize; 601 maxsize = ep->maxpacksize;
598 ep->freqmax = (maxsize / (frame_bits >> 3)) 602 ep->freqmax = (maxsize / (frame_bits >> 3))
599 << (16 - ep->datainterval); 603 << (16 - ep->datainterval);
600 } else {
601 /* no max. packet size: just take 25% higher than nominal */
602 ep->freqmax = ep->freqn + (ep->freqn >> 2);
603 maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
604 >> (16 - ep->datainterval);
605 } 604 }
606 605
607 if (ep->fill_max) 606 if (ep->fill_max)
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index 6430ed2a9f65..c21a3df9a0df 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -503,7 +503,7 @@ static snd_pcm_uframes_t hiface_pcm_pointer(struct snd_pcm_substream *alsa_sub)
503 snd_pcm_uframes_t dma_offset; 503 snd_pcm_uframes_t dma_offset;
504 504
505 if (rt->panic || !sub) 505 if (rt->panic || !sub)
506 return SNDRV_PCM_STATE_XRUN; 506 return SNDRV_PCM_POS_XRUN;
507 507
508 spin_lock_irqsave(&sub->lock, flags); 508 spin_lock_irqsave(&sub->lock, flags);
509 dma_offset = sub->dma_off; 509 dma_offset = sub->dma_off;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d5438083fd6a..95558ef4a7a0 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
888 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ 888 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
889 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ 889 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
890 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ 890 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
891 case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
891 case USB_ID(0x046d, 0x0991): 892 case USB_ID(0x046d, 0x0991):
892 /* Most audio usb devices lie about volume resolution. 893 /* Most audio usb devices lie about volume resolution.
893 * Most Logitech webcams have res = 384. 894 * Most Logitech webcams have res = 384.
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 1bc45e71f1fe..0df9ede99dfd 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -319,19 +319,19 @@ static int create_auto_midi_quirk(struct snd_usb_audio *chip,
319 if (altsd->bNumEndpoints < 1) 319 if (altsd->bNumEndpoints < 1)
320 return -ENODEV; 320 return -ENODEV;
321 epd = get_endpoint(alts, 0); 321 epd = get_endpoint(alts, 0);
322 if (!usb_endpoint_xfer_bulk(epd) || 322 if (!usb_endpoint_xfer_bulk(epd) &&
323 !usb_endpoint_xfer_int(epd)) 323 !usb_endpoint_xfer_int(epd))
324 return -ENODEV; 324 return -ENODEV;
325 325
326 switch (USB_ID_VENDOR(chip->usb_id)) { 326 switch (USB_ID_VENDOR(chip->usb_id)) {
327 case 0x0499: /* Yamaha */ 327 case 0x0499: /* Yamaha */
328 err = create_yamaha_midi_quirk(chip, iface, driver, alts); 328 err = create_yamaha_midi_quirk(chip, iface, driver, alts);
329 if (err < 0 && err != -ENODEV) 329 if (err != -ENODEV)
330 return err; 330 return err;
331 break; 331 break;
332 case 0x0582: /* Roland */ 332 case 0x0582: /* Roland */
333 err = create_roland_midi_quirk(chip, iface, driver, alts); 333 err = create_roland_midi_quirk(chip, iface, driver, alts);
334 if (err < 0 && err != -ENODEV) 334 if (err != -ENODEV)
335 return err; 335 return err;
336 break; 336 break;
337 } 337 }
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index ca9fa4d32e07..07819bfa7dba 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -1026,9 +1026,10 @@ kvp_get_ip_info(int family, char *if_name, int op,
1026 1026
1027 if (sn_offset == 0) 1027 if (sn_offset == 0)
1028 strcpy(sn_str, cidr_mask); 1028 strcpy(sn_str, cidr_mask);
1029 else 1029 else {
1030 strcat((char *)ip_buffer->sub_net, ";");
1030 strcat(sn_str, cidr_mask); 1031 strcat(sn_str, cidr_mask);
1031 strcat((char *)ip_buffer->sub_net, ";"); 1032 }
1032 sn_offset += strlen(sn_str) + 1; 1033 sn_offset += strlen(sn_str) + 1;
1033 } 1034 }
1034 1035